diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 1f47165d91f..ae24929ffa2 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -41,3 +41,4 @@ BWC_VERSION: - "7.2.1" - "7.3.0" - "7.3.1" + - "7.3.2" diff --git a/.ci/matrix-runtime-javas.yml b/.ci/matrix-runtime-javas.yml index a865f33710b..0bf7574bf23 100644 --- a/.ci/matrix-runtime-javas.yml +++ b/.ci/matrix-runtime-javas.yml @@ -12,6 +12,7 @@ ES_RUNTIME_JAVA: - java12 - openjdk12 - openjdk13 + - openjdk14 - zulu8 - zulu11 - zulu12 diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java index ef839016314..799283ab779 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/DistroTestPlugin.java @@ -95,10 +95,12 @@ public class DistroTestPlugin implements Plugin { TaskProvider copyUpgradeTask = configureCopyUpgradeTask(project, upgradeVersion, upgradeDir); TaskProvider copyPluginsTask = configureCopyPluginsTask(project, pluginsDir); - Map> batsTests = new HashMap<>(); + TaskProvider destructiveDistroTest = project.getTasks().register("destructiveDistroTest"); for (ElasticsearchDistribution distribution : distributions) { - configureDistroTest(project, distribution); + TaskProvider destructiveTask = configureDistroTest(project, distribution); + destructiveDistroTest.configure(t -> t.dependsOn(destructiveTask)); } + Map> batsTests = new HashMap<>(); batsTests.put("bats oss", configureBatsTest(project, "oss", distributionsDir, copyDistributionsTask)); batsTests.put("bats default", configureBatsTest(project, "default", distributionsDir, copyDistributionsTask)); configureBatsTest(project, "plugins",distributionsDir, copyDistributionsTask, copyPluginsTask).configure(t -> @@ -126,7 +128,6 @@ public class DistroTestPlugin implements Plugin { } } - batsTests.forEach((desc, task) -> { configureVMWrapperTask(vmProject, desc, task.getName(), vmDependencies).configure(t -> { t.setProgressHandler(new BatsProgressLogger(project.getLogger())); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java index f3a49f06459..d3b2ea466f4 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java @@ -20,6 +20,8 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; @@ -170,6 +172,35 @@ public final class SnapshotClient { VerifyRepositoryResponse::fromXContent, listener, emptySet()); } + /** + * Cleans up a snapshot repository. + * See Snapshot and Restore + * API on elastic.co + * @param cleanupRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public CleanupRepositoryResponse cleanupRepository(CleanupRepositoryRequest cleanupRepositoryRequest, RequestOptions options) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(cleanupRepositoryRequest, SnapshotRequestConverters::cleanupRepository, + options, CleanupRepositoryResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously cleans up a snapshot repository. + * See Snapshot and Restore + * API on elastic.co + * @param cleanupRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void cleanupRepositoryAsync(CleanupRepositoryRequest cleanupRepositoryRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(cleanupRepositoryRequest, SnapshotRequestConverters::cleanupRepository, + options, CleanupRepositoryResponse::fromXContent, listener, emptySet()); + } + /** * Creates a snapshot. *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java index f106a7992e6..3d033bc2890 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotRequestConverters.java @@ -23,6 +23,7 @@ import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; @@ -94,6 +95,20 @@ final class SnapshotRequestConverters { return request; } + static Request cleanupRepository(CleanupRepositoryRequest cleanupRepositoryRequest) { + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_snapshot") + .addPathPart(cleanupRepositoryRequest.name()) + .addPathPartAsIs("_cleanup") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + + RequestConverters.Params parameters = new RequestConverters.Params(); + parameters.withMasterTimeout(cleanupRepositoryRequest.masterNodeTimeout()); + parameters.withTimeout(cleanupRepositoryRequest.timeout()); + request.addParameters(parameters.asMap()); + return request; + } + static Request createSnapshot(CreateSnapshotRequest createSnapshotRequest) throws IOException { String endpoint = new RequestConverters.EndpointBuilder().addPathPart("_snapshot") .addPathPart(createSnapshotRequest.repository()) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/EvaluateDataFrameRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/EvaluateDataFrameRequest.java index 2e3bbb17050..cfb5eeb6ef3 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/EvaluateDataFrameRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/EvaluateDataFrameRequest.java @@ -21,7 +21,9 @@ package org.elasticsearch.client.ml; import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; +import org.elasticsearch.client.ml.dataframe.QueryConfig; import org.elasticsearch.client.ml.dataframe.evaluation.Evaluation; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -37,20 +39,25 @@ import java.util.Objects; import java.util.Optional; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; public class EvaluateDataFrameRequest implements ToXContentObject, Validatable { private static final ParseField INDEX = new ParseField("index"); + private static final ParseField QUERY = new ParseField("query"); private static final ParseField EVALUATION = new ParseField("evaluation"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "evaluate_data_frame_request", true, args -> new EvaluateDataFrameRequest((List) args[0], (Evaluation) args[1])); + "evaluate_data_frame_request", + true, + args -> new EvaluateDataFrameRequest((List) args[0], (QueryConfig) args[1], (Evaluation) args[2])); static { PARSER.declareStringArray(constructorArg(), INDEX); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> QueryConfig.fromXContent(p), QUERY); PARSER.declareObject(constructorArg(), (p, c) -> parseEvaluation(p), EVALUATION); } @@ -67,14 +74,16 @@ public class EvaluateDataFrameRequest implements ToXContentObject, Validatable { } private List indices; + private QueryConfig queryConfig; private Evaluation evaluation; - public EvaluateDataFrameRequest(String index, Evaluation evaluation) { - this(Arrays.asList(index), evaluation); + public EvaluateDataFrameRequest(String index, @Nullable QueryConfig queryConfig, Evaluation evaluation) { + this(Arrays.asList(index), queryConfig, evaluation); } - public EvaluateDataFrameRequest(List indices, Evaluation evaluation) { + public EvaluateDataFrameRequest(List indices, @Nullable QueryConfig queryConfig, Evaluation evaluation) { setIndices(indices); + setQueryConfig(queryConfig); setEvaluation(evaluation); } @@ -87,6 +96,14 @@ public class EvaluateDataFrameRequest implements ToXContentObject, Validatable { this.indices = new ArrayList<>(indices); } + public QueryConfig getQueryConfig() { + return queryConfig; + } + + public final void setQueryConfig(QueryConfig queryConfig) { + this.queryConfig = queryConfig; + } + public Evaluation getEvaluation() { return evaluation; } @@ -111,18 +128,22 @@ public class EvaluateDataFrameRequest implements ToXContentObject, Validatable { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder - .startObject() - .array(INDEX.getPreferredName(), indices.toArray()) - .startObject(EVALUATION.getPreferredName()) - .field(evaluation.getName(), evaluation) - .endObject() + builder.startObject(); + builder.array(INDEX.getPreferredName(), indices.toArray()); + if (queryConfig != null) { + builder.field(QUERY.getPreferredName(), queryConfig.getQuery()); + } + builder + .startObject(EVALUATION.getPreferredName()) + .field(evaluation.getName(), evaluation) .endObject(); + builder.endObject(); + return builder; } @Override public int hashCode() { - return Objects.hash(indices, evaluation); + return Objects.hash(indices, queryConfig, evaluation); } @Override @@ -131,6 +152,7 @@ public class EvaluateDataFrameRequest implements ToXContentObject, Validatable { if (o == null || getClass() != o.getClass()) return false; EvaluateDataFrameRequest that = (EvaluateDataFrameRequest) o; return Objects.equals(indices, that.indices) + && Objects.equals(queryConfig, that.queryConfig) && Objects.equals(evaluation, that.evaluation); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index c43aadd6904..e0fac7bb09a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteModelSnapshotRequest; import org.elasticsearch.client.ml.EvaluateDataFrameRequest; +import org.elasticsearch.client.ml.EvaluateDataFrameRequestTests; import org.elasticsearch.client.ml.FindFileStructureRequest; import org.elasticsearch.client.ml.FindFileStructureRequestTests; import org.elasticsearch.client.ml.FlushJobRequest; @@ -85,9 +86,6 @@ import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests; import org.elasticsearch.client.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.client.ml.dataframe.MlDataFrameAnalysisNamedXContentProvider; import org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; -import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.BinarySoftClassification; -import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.PrecisionMetric; -import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.RecallMetric; import org.elasticsearch.client.ml.filestructurefinder.FileStructure; import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.Detector; @@ -779,13 +777,7 @@ public class MLRequestConvertersTests extends ESTestCase { } public void testEvaluateDataFrame() throws IOException { - EvaluateDataFrameRequest evaluateRequest = - new EvaluateDataFrameRequest( - Arrays.asList(generateRandomStringArray(1, 10, false, false)), - new BinarySoftClassification( - randomAlphaOfLengthBetween(1, 10), - randomAlphaOfLengthBetween(1, 10), - PrecisionMetric.at(0.5), RecallMetric.at(0.6, 0.7))); + EvaluateDataFrameRequest evaluateRequest = EvaluateDataFrameRequestTests.createRandom(); Request request = MLRequestConverters.evaluateDataFrame(evaluateRequest); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals("/_ml/data_frame/_evaluate", request.getEndpoint()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index e820baff760..dd374dc5256 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -149,6 +149,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.junit.After; @@ -1455,7 +1456,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { public void testStopDataFrameAnalyticsConfig() throws Exception { String sourceIndex = "stop-test-source-index"; String destIndex = "stop-test-dest-index"; - createIndex(sourceIndex, mappingForClassification()); + createIndex(sourceIndex, defaultMappingForTest()); highLevelClient().index(new IndexRequest(sourceIndex).source(XContentType.JSON, "total", 10000) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); @@ -1553,27 +1554,28 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { assertThat(exception.status().getStatus(), equalTo(404)); } - public void testEvaluateDataFrame() throws IOException { + public void testEvaluateDataFrame_BinarySoftClassification() throws IOException { String indexName = "evaluate-test-index"; createIndex(indexName, mappingForClassification()); BulkRequest bulk = new BulkRequest() .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .add(docForClassification(indexName, false, 0.1)) // #0 - .add(docForClassification(indexName, false, 0.2)) // #1 - .add(docForClassification(indexName, false, 0.3)) // #2 - .add(docForClassification(indexName, false, 0.4)) // #3 - .add(docForClassification(indexName, false, 0.7)) // #4 - .add(docForClassification(indexName, true, 0.2)) // #5 - .add(docForClassification(indexName, true, 0.3)) // #6 - .add(docForClassification(indexName, true, 0.4)) // #7 - .add(docForClassification(indexName, true, 0.8)) // #8 - .add(docForClassification(indexName, true, 0.9)); // #9 + .add(docForClassification(indexName, "blue", false, 0.1)) // #0 + .add(docForClassification(indexName, "blue", false, 0.2)) // #1 + .add(docForClassification(indexName, "blue", false, 0.3)) // #2 + .add(docForClassification(indexName, "blue", false, 0.4)) // #3 + .add(docForClassification(indexName, "blue", false, 0.7)) // #4 + .add(docForClassification(indexName, "blue", true, 0.2)) // #5 + .add(docForClassification(indexName, "green", true, 0.3)) // #6 + .add(docForClassification(indexName, "green", true, 0.4)) // #7 + .add(docForClassification(indexName, "green", true, 0.8)) // #8 + .add(docForClassification(indexName, "green", true, 0.9)); // #9 highLevelClient().bulk(bulk, RequestOptions.DEFAULT); MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); EvaluateDataFrameRequest evaluateDataFrameRequest = new EvaluateDataFrameRequest( indexName, + null, new BinarySoftClassification( actualField, probabilityField, @@ -1624,7 +1626,48 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { assertThat(curvePointAtThreshold1.getTruePositiveRate(), equalTo(0.0)); assertThat(curvePointAtThreshold1.getFalsePositiveRate(), equalTo(0.0)); assertThat(curvePointAtThreshold1.getThreshold(), equalTo(1.0)); + } + public void testEvaluateDataFrame_BinarySoftClassification_WithQuery() throws IOException { + String indexName = "evaluate-with-query-test-index"; + createIndex(indexName, mappingForClassification()); + BulkRequest bulk = new BulkRequest() + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .add(docForClassification(indexName, "blue", true, 1.0)) // #0 + .add(docForClassification(indexName, "blue", true, 1.0)) // #1 + .add(docForClassification(indexName, "blue", true, 1.0)) // #2 + .add(docForClassification(indexName, "blue", true, 1.0)) // #3 + .add(docForClassification(indexName, "blue", true, 0.0)) // #4 + .add(docForClassification(indexName, "blue", true, 0.0)) // #5 + .add(docForClassification(indexName, "green", true, 0.0)) // #6 + .add(docForClassification(indexName, "green", true, 0.0)) // #7 + .add(docForClassification(indexName, "green", true, 0.0)) // #8 + .add(docForClassification(indexName, "green", true, 1.0)); // #9 + highLevelClient().bulk(bulk, RequestOptions.DEFAULT); + + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + EvaluateDataFrameRequest evaluateDataFrameRequest = + new EvaluateDataFrameRequest( + indexName, + // Request only "blue" subset to be evaluated + new QueryConfig(QueryBuilders.termQuery(datasetField, "blue")), + new BinarySoftClassification(actualField, probabilityField, ConfusionMatrixMetric.at(0.5))); + + EvaluateDataFrameResponse evaluateDataFrameResponse = + execute(evaluateDataFrameRequest, machineLearningClient::evaluateDataFrame, machineLearningClient::evaluateDataFrameAsync); + assertThat(evaluateDataFrameResponse.getEvaluationName(), equalTo(BinarySoftClassification.NAME)); + assertThat(evaluateDataFrameResponse.getMetrics().size(), equalTo(1)); + + ConfusionMatrixMetric.Result confusionMatrixResult = evaluateDataFrameResponse.getMetricByName(ConfusionMatrixMetric.NAME); + assertThat(confusionMatrixResult.getMetricName(), equalTo(ConfusionMatrixMetric.NAME)); + ConfusionMatrixMetric.ConfusionMatrix confusionMatrix = confusionMatrixResult.getScoreByThreshold("0.5"); + assertThat(confusionMatrix.getTruePositives(), equalTo(4L)); // docs #0, #1, #2 and #3 + assertThat(confusionMatrix.getFalsePositives(), equalTo(0L)); + assertThat(confusionMatrix.getTrueNegatives(), equalTo(0L)); + assertThat(confusionMatrix.getFalseNegatives(), equalTo(2L)); // docs #4 and #5 + } + + public void testEvaluateDataFrame_Regression() throws IOException { String regressionIndex = "evaluate-regression-test-index"; createIndex(regressionIndex, mappingForRegression()); BulkRequest regressionBulk = new BulkRequest() @@ -1641,10 +1684,14 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { .add(docForRegression(regressionIndex, 0.5, 0.9)); // #9 highLevelClient().bulk(regressionBulk, RequestOptions.DEFAULT); - evaluateDataFrameRequest = new EvaluateDataFrameRequest(regressionIndex, - new Regression(actualRegression, probabilityRegression, new MeanSquaredErrorMetric(), new RSquaredMetric())); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + EvaluateDataFrameRequest evaluateDataFrameRequest = + new EvaluateDataFrameRequest( + regressionIndex, + null, + new Regression(actualRegression, probabilityRegression, new MeanSquaredErrorMetric(), new RSquaredMetric())); - evaluateDataFrameResponse = + EvaluateDataFrameResponse evaluateDataFrameResponse = execute(evaluateDataFrameRequest, machineLearningClient::evaluateDataFrame, machineLearningClient::evaluateDataFrameAsync); assertThat(evaluateDataFrameResponse.getEvaluationName(), equalTo(Regression.NAME)); assertThat(evaluateDataFrameResponse.getMetrics().size(), equalTo(2)); @@ -1671,12 +1718,16 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { .endObject(); } + private static final String datasetField = "dataset"; private static final String actualField = "label"; private static final String probabilityField = "p"; private static XContentBuilder mappingForClassification() throws IOException { return XContentFactory.jsonBuilder().startObject() .startObject("properties") + .startObject(datasetField) + .field("type", "keyword") + .endObject() .startObject(actualField) .field("type", "keyword") .endObject() @@ -1687,10 +1738,10 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { .endObject(); } - private static IndexRequest docForClassification(String indexName, boolean isTrue, double p) { + private static IndexRequest docForClassification(String indexName, String dataset, boolean isTrue, double p) { return new IndexRequest() .index(indexName) - .source(XContentType.JSON, actualField, Boolean.toString(isTrue), probabilityField, p); + .source(XContentType.JSON, datasetField, dataset, actualField, Boolean.toString(isTrue), probabilityField, p); } private static final String actualRegression = "regression_actual"; @@ -1725,7 +1776,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { BulkRequest bulk1 = new BulkRequest() .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (int i = 0; i < 10; ++i) { - bulk1.add(docForClassification(indexName, randomBoolean(), randomDoubleBetween(0.0, 1.0, true))); + bulk1.add(docForClassification(indexName, randomAlphaOfLength(10), randomBoolean(), randomDoubleBetween(0.0, 1.0, true))); } highLevelClient().bulk(bulk1, RequestOptions.DEFAULT); @@ -1751,7 +1802,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { BulkRequest bulk2 = new BulkRequest() .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (int i = 10; i < 100; ++i) { - bulk2.add(docForClassification(indexName, randomBoolean(), randomDoubleBetween(0.0, 1.0, true))); + bulk2.add(docForClassification(indexName, randomAlphaOfLength(10), randomBoolean(), randomDoubleBetween(0.0, 1.0, true))); } highLevelClient().bulk(bulk2, RequestOptions.DEFAULT); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 00d905aa140..171a0cae9da 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -43,6 +43,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.ScriptQueryBuilder; @@ -81,6 +82,7 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; +import org.elasticsearch.xpack.core.index.query.PinnedQueryBuilder; import org.hamcrest.Matchers; import org.junit.Before; @@ -92,7 +94,10 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; @@ -1373,7 +1378,19 @@ public class SearchIT extends ESRestHighLevelClientTestCase { assertCountHeader(countResponse); assertEquals(3, countResponse.getCount()); } - + + public void testSearchWithBasicLicensedQuery() throws IOException { + SearchRequest searchRequest = new SearchRequest("index"); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + PinnedQueryBuilder pinnedQuery = new PinnedQueryBuilder(new MatchAllQueryBuilder(), "2", "1"); + searchSourceBuilder.query(pinnedQuery); + searchRequest.source(searchSourceBuilder); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + assertSearchHeader(searchResponse); + assertFirstHit(searchResponse, hasId("2")); + assertSecondHit(searchResponse, hasId("1")); + } + private static void assertCountHeader(CountResponse countResponse) { assertEquals(0, countResponse.getSkippedShards()); assertEquals(0, countResponse.getFailedShards()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java index 5c30de5c057..4b7d1fb36d2 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java @@ -20,6 +20,8 @@ package org.elasticsearch.client; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; @@ -135,6 +137,17 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase { assertThat(response.getNodes().size(), equalTo(1)); } + public void testCleanupRepository() throws IOException { + AcknowledgedResponse putRepositoryResponse = createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}"); + assertTrue(putRepositoryResponse.isAcknowledged()); + + CleanupRepositoryRequest request = new CleanupRepositoryRequest("test"); + CleanupRepositoryResponse response = execute(request, highLevelClient().snapshot()::cleanupRepository, + highLevelClient().snapshot()::cleanupRepositoryAsync); + assertThat(response.result().bytes(), equalTo(0L)); + assertThat(response.result().blobs(), equalTo(0L)); + } + public void testCreateSnapshot() throws IOException { String repository = "test_repository"; assertTrue(createTestRepository(repository, FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 434ba1a1b20..a5bf1c229cb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -48,6 +48,7 @@ import org.elasticsearch.client.ml.DeleteForecastRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobResponse; import org.elasticsearch.client.ml.DeleteModelSnapshotRequest; +import org.elasticsearch.client.ml.EstimateMemoryUsageResponse; import org.elasticsearch.client.ml.EvaluateDataFrameRequest; import org.elasticsearch.client.ml.EvaluateDataFrameResponse; import org.elasticsearch.client.ml.FindFileStructureRequest; @@ -177,7 +178,6 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.tasks.TaskId; -import org.hamcrest.CoreMatchers; import org.junit.After; import java.io.IOException; @@ -194,11 +194,13 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.core.Is.is; public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { @@ -3175,16 +3177,16 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { BulkRequest bulkRequest = new BulkRequest(indexName) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) - .add(new IndexRequest().source(XContentType.JSON, "label", false, "p", 0.1)) // #0 - .add(new IndexRequest().source(XContentType.JSON, "label", false, "p", 0.2)) // #1 - .add(new IndexRequest().source(XContentType.JSON, "label", false, "p", 0.3)) // #2 - .add(new IndexRequest().source(XContentType.JSON, "label", false, "p", 0.4)) // #3 - .add(new IndexRequest().source(XContentType.JSON, "label", false, "p", 0.7)) // #4 - .add(new IndexRequest().source(XContentType.JSON, "label", true, "p", 0.2)) // #5 - .add(new IndexRequest().source(XContentType.JSON, "label", true, "p", 0.3)) // #6 - .add(new IndexRequest().source(XContentType.JSON, "label", true, "p", 0.4)) // #7 - .add(new IndexRequest().source(XContentType.JSON, "label", true, "p", 0.8)) // #8 - .add(new IndexRequest().source(XContentType.JSON, "label", true, "p", 0.9)); // #9 + .add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", false, "p", 0.1)) // #0 + .add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", false, "p", 0.2)) // #1 + .add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", false, "p", 0.3)) // #2 + .add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", false, "p", 0.4)) // #3 + .add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", false, "p", 0.7)) // #4 + .add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", true, "p", 0.2)) // #5 + .add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", true, "p", 0.3)) // #6 + .add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", true, "p", 0.4)) // #7 + .add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", true, "p", 0.8)) // #8 + .add(new IndexRequest().source(XContentType.JSON, "dataset", "blue", "label", true, "p", 0.9)); // #9 RestHighLevelClient client = highLevelClient(); client.indices().create(createIndexRequest, RequestOptions.DEFAULT); client.bulk(bulkRequest, RequestOptions.DEFAULT); @@ -3192,14 +3194,15 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { // tag::evaluate-data-frame-request EvaluateDataFrameRequest request = new EvaluateDataFrameRequest( // <1> indexName, // <2> - new BinarySoftClassification( // <3> - "label", // <4> - "p", // <5> - // Evaluation metrics // <6> - PrecisionMetric.at(0.4, 0.5, 0.6), // <7> - RecallMetric.at(0.5, 0.7), // <8> - ConfusionMatrixMetric.at(0.5), // <9> - AucRocMetric.withCurve())); // <10> + new QueryConfig(QueryBuilders.termQuery("dataset", "blue")), // <3> + new BinarySoftClassification( // <4> + "label", // <5> + "p", // <6> + // Evaluation metrics // <7> + PrecisionMetric.at(0.4, 0.5, 0.6), // <8> + RecallMetric.at(0.5, 0.7), // <9> + ConfusionMatrixMetric.at(0.5), // <10> + AucRocMetric.withCurve())); // <11> // end::evaluate-data-frame-request // tag::evaluate-data-frame-execute @@ -3220,14 +3223,15 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { metrics.stream().map(m -> m.getMetricName()).collect(Collectors.toList()), containsInAnyOrder(PrecisionMetric.NAME, RecallMetric.NAME, ConfusionMatrixMetric.NAME, AucRocMetric.NAME)); assertThat(precision, closeTo(0.6, 1e-9)); - assertThat(confusionMatrix.getTruePositives(), CoreMatchers.equalTo(2L)); // docs #8 and #9 - assertThat(confusionMatrix.getFalsePositives(), CoreMatchers.equalTo(1L)); // doc #4 - assertThat(confusionMatrix.getTrueNegatives(), CoreMatchers.equalTo(4L)); // docs #0, #1, #2 and #3 - assertThat(confusionMatrix.getFalseNegatives(), CoreMatchers.equalTo(3L)); // docs #5, #6 and #7 + assertThat(confusionMatrix.getTruePositives(), equalTo(2L)); // docs #8 and #9 + assertThat(confusionMatrix.getFalsePositives(), equalTo(1L)); // doc #4 + assertThat(confusionMatrix.getTrueNegatives(), equalTo(4L)); // docs #0, #1, #2 and #3 + assertThat(confusionMatrix.getFalseNegatives(), equalTo(3L)); // docs #5, #6 and #7 } { EvaluateDataFrameRequest request = new EvaluateDataFrameRequest( indexName, + new QueryConfig(QueryBuilders.termQuery("dataset", "blue")), new BinarySoftClassification( "label", "p", @@ -3262,6 +3266,72 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { } } + public void testEstimateMemoryUsage() throws Exception { + createIndex("estimate-test-source-index"); + BulkRequest bulkRequest = + new BulkRequest("estimate-test-source-index") + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < 10; ++i) { + bulkRequest.add(new IndexRequest().source(XContentType.JSON, "timestamp", 123456789L, "total", 10L)); + } + RestHighLevelClient client = highLevelClient(); + client.bulk(bulkRequest, RequestOptions.DEFAULT); + { + // tag::estimate-memory-usage-request + DataFrameAnalyticsConfig config = DataFrameAnalyticsConfig.builder() + .setSource(DataFrameAnalyticsSource.builder().setIndex("estimate-test-source-index").build()) + .setAnalysis(OutlierDetection.createDefault()) + .build(); + PutDataFrameAnalyticsRequest request = new PutDataFrameAnalyticsRequest(config); // <1> + // end::estimate-memory-usage-request + + // tag::estimate-memory-usage-execute + EstimateMemoryUsageResponse response = client.machineLearning().estimateMemoryUsage(request, RequestOptions.DEFAULT); + // end::estimate-memory-usage-execute + + // tag::estimate-memory-usage-response + ByteSizeValue expectedMemoryWithoutDisk = response.getExpectedMemoryWithoutDisk(); // <1> + ByteSizeValue expectedMemoryWithDisk = response.getExpectedMemoryWithDisk(); // <2> + // end::estimate-memory-usage-response + + // We are pretty liberal here as this test does not aim at verifying concrete numbers but rather end-to-end user workflow. + ByteSizeValue lowerBound = new ByteSizeValue(1, ByteSizeUnit.KB); + ByteSizeValue upperBound = new ByteSizeValue(1, ByteSizeUnit.GB); + assertThat(expectedMemoryWithoutDisk, allOf(greaterThan(lowerBound), lessThan(upperBound))); + assertThat(expectedMemoryWithDisk, allOf(greaterThan(lowerBound), lessThan(upperBound))); + } + { + DataFrameAnalyticsConfig config = DataFrameAnalyticsConfig.builder() + .setSource(DataFrameAnalyticsSource.builder().setIndex("estimate-test-source-index").build()) + .setAnalysis(OutlierDetection.createDefault()) + .build(); + PutDataFrameAnalyticsRequest request = new PutDataFrameAnalyticsRequest(config); + // tag::estimate-memory-usage-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(EstimateMemoryUsageResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::estimate-memory-usage-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::estimate-memory-usage-execute-async + client.machineLearning().estimateMemoryUsageAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::estimate-memory-usage-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testCreateFilter() throws Exception { RestHighLevelClient client = highLevelClient(); { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/EvaluateDataFrameRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/EvaluateDataFrameRequestTests.java new file mode 100644 index 00000000000..16496875a03 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/EvaluateDataFrameRequestTests.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.dataframe.QueryConfig; +import org.elasticsearch.client.ml.dataframe.evaluation.Evaluation; +import org.elasticsearch.client.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; +import org.elasticsearch.client.ml.dataframe.evaluation.regression.RegressionTests; +import org.elasticsearch.client.ml.dataframe.evaluation.softclassification.BinarySoftClassificationTests; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Predicate; + +public class EvaluateDataFrameRequestTests extends AbstractXContentTestCase { + + public static EvaluateDataFrameRequest createRandom() { + int indicesCount = randomIntBetween(1, 5); + List indices = new ArrayList<>(indicesCount); + for (int i = 0; i < indicesCount; i++) { + indices.add(randomAlphaOfLength(10)); + } + QueryConfig queryConfig = randomBoolean() + ? new QueryConfig(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))) + : null; + Evaluation evaluation = randomBoolean() ? BinarySoftClassificationTests.createRandom() : RegressionTests.createRandom(); + return new EvaluateDataFrameRequest(indices, queryConfig, evaluation); + } + + @Override + protected EvaluateDataFrameRequest createTestInstance() { + return createRandom(); + } + + @Override + protected EvaluateDataFrameRequest doParseInstance(XContentParser parser) throws IOException { + return EvaluateDataFrameRequest.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + // allow unknown fields in root only + return field -> !field.isEmpty(); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List namedXContent = new ArrayList<>(); + namedXContent.addAll(new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedXContents()); + namedXContent.addAll(new MlEvaluationNamedXContentProvider().getNamedXContentParsers()); + return new NamedXContentRegistry(namedXContent); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RegressionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RegressionTests.java index 89e4823b93e..5d2a614663d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RegressionTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/regression/RegressionTests.java @@ -36,8 +36,7 @@ public class RegressionTests extends AbstractXContentTestCase { return new NamedXContentRegistry(new MlEvaluationNamedXContentProvider().getNamedXContentParsers()); } - @Override - protected Regression createTestInstance() { + public static Regression createRandom() { List metrics = new ArrayList<>(); if (randomBoolean()) { metrics.add(new MeanSquaredErrorMetric()); @@ -50,6 +49,11 @@ public class RegressionTests extends AbstractXContentTestCase { new Regression(randomAlphaOfLength(10), randomAlphaOfLength(10), metrics.isEmpty() ? null : metrics); } + @Override + protected Regression createTestInstance() { + return createRandom(); + } + @Override protected Regression doParseInstance(XContentParser parser) throws IOException { return Regression.fromXContent(parser); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/softclassification/BinarySoftClassificationTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/softclassification/BinarySoftClassificationTests.java index 2fb8a21e3a1..7fd9af2ab88 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/softclassification/BinarySoftClassificationTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/dataframe/evaluation/softclassification/BinarySoftClassificationTests.java @@ -37,8 +37,7 @@ public class BinarySoftClassificationTests extends AbstractXContentTestCase metrics = new ArrayList<>(); if (randomBoolean()) { metrics.add(new AucRocMetric(randomBoolean())); @@ -66,6 +65,11 @@ public class BinarySoftClassificationTests extends AbstractXContentTestCase Downloading")); // No progress bar in batch mode assertThat(terminal.getOutput(), not(containsString("100%"))); @@ -1225,7 +1225,7 @@ public class InstallPluginCommandTests extends ESTestCase { UserException e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertEquals("installation aborted by user", e.getMessage()); - assertThat(terminal.getOutput(), containsString("WARNING: " + warning)); + assertThat(terminal.getErrorOutput(), containsString("WARNING: " + warning)); try (Stream fileStream = Files.list(env.v2().pluginsFile())) { assertThat(fileStream.collect(Collectors.toList()), empty()); } @@ -1238,7 +1238,7 @@ public class InstallPluginCommandTests extends ESTestCase { terminal.addTextInput("n"); e = expectThrows(UserException.class, () -> installPlugin(pluginZip, env.v1())); assertEquals("installation aborted by user", e.getMessage()); - assertThat(terminal.getOutput(), containsString("WARNING: " + warning)); + assertThat(terminal.getErrorOutput(), containsString("WARNING: " + warning)); try (Stream fileStream = Files.list(env.v2().pluginsFile())) { assertThat(fileStream.collect(Collectors.toList()), empty()); } @@ -1251,7 +1251,7 @@ public class InstallPluginCommandTests extends ESTestCase { } installPlugin(pluginZip, env.v1()); for (String warning : warnings) { - assertThat(terminal.getOutput(), containsString("WARNING: " + warning)); + assertThat(terminal.getErrorOutput(), containsString("WARNING: " + warning)); } } diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java index 8144c5f0600..bb839008d91 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java @@ -247,8 +247,11 @@ public class ListPluginsCommandTests extends ESTestCase { MockTerminal terminal = listPlugins(home); String message = "plugin [fake_plugin1] was built for Elasticsearch version 1.0 but version " + Version.CURRENT + " is required"; assertEquals( - "fake_plugin1\n" + "WARNING: " + message + "\n" + "fake_plugin2\n", - terminal.getOutput()); + "fake_plugin1\nfake_plugin2\n", + terminal.getOutput()); + assertEquals( + "WARNING: " + message + "\n", + terminal.getErrorOutput()); String[] params = {"-s"}; terminal = listPlugins(home, params); diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java index c62d37a4e28..40f17196472 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java @@ -237,11 +237,14 @@ public class RemovePluginCommandTests extends ESTestCase { return false; } }.main(new String[] { "-Epath.home=" + home, "fake" }, terminal); - try (BufferedReader reader = new BufferedReader(new StringReader(terminal.getOutput()))) { + try (BufferedReader reader = new BufferedReader(new StringReader(terminal.getOutput())); + BufferedReader errorReader = new BufferedReader(new StringReader(terminal.getErrorOutput())) + ) { assertEquals("-> removing [fake]...", reader.readLine()); assertEquals("ERROR: plugin [fake] not found; run 'elasticsearch-plugin list' to get list of installed plugins", - reader.readLine()); + errorReader.readLine()); assertNull(reader.readLine()); + assertNull(errorReader.readLine()); } } diff --git a/docs/java-rest/high-level/ml/estimate-memory-usage.asciidoc b/docs/java-rest/high-level/ml/estimate-memory-usage.asciidoc new file mode 100644 index 00000000000..659e7e11755 --- /dev/null +++ b/docs/java-rest/high-level/ml/estimate-memory-usage.asciidoc @@ -0,0 +1,35 @@ +-- +:api: estimate-memory-usage +:request: PutDataFrameAnalyticsRequest +:response: EstimateMemoryUsageResponse +-- +[id="{upid}-{api}"] +=== Estimate memory usage API + +The Estimate memory usage API is used to estimate memory usage of {dfanalytics}. +Estimation results can be used when deciding the appropriate value for `model_memory_limit` setting later on. + +The API accepts an +{request}+ object and returns an +{response}+. + +[id="{upid}-{api}-request"] +==== Estimate memory usage Request + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> Constructing a new request containing a {dataframe-analytics-config} for which memory usage estimation should be performed + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ contains the memory usage estimates. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Estimated memory usage under the assumption that the whole {dfanalytics} should happen in memory (i.e. without overflowing to disk). +<2> Estimated memory usage under the assumption that overflowing to disk is allowed during {dfanalytics}. \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/evaluate-data-frame.asciidoc b/docs/java-rest/high-level/ml/evaluate-data-frame.asciidoc index 660603d2e38..1fe4cc7af01 100644 --- a/docs/java-rest/high-level/ml/evaluate-data-frame.asciidoc +++ b/docs/java-rest/high-level/ml/evaluate-data-frame.asciidoc @@ -18,14 +18,15 @@ include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- <1> Constructing a new evaluation request <2> Reference to an existing index -<3> Kind of evaluation to perform -<4> Name of the field in the index. Its value denotes the actual (i.e. ground truth) label for an example. Must be either true or false -<5> Name of the field in the index. Its value denotes the probability (as per some ML algorithm) of the example being classified as positive -<6> The remaining parameters are the metrics to be calculated based on the two fields described above. -<7> https://en.wikipedia.org/wiki/Precision_and_recall[Precision] calculated at thresholds: 0.4, 0.5 and 0.6 -<8> https://en.wikipedia.org/wiki/Precision_and_recall[Recall] calculated at thresholds: 0.5 and 0.7 -<9> https://en.wikipedia.org/wiki/Confusion_matrix[Confusion matrix] calculated at threshold 0.5 -<10> https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve[AuC ROC] calculated and the curve points returned +<3> The query with which to select data from indices +<4> Kind of evaluation to perform +<5> Name of the field in the index. Its value denotes the actual (i.e. ground truth) label for an example. Must be either true or false +<6> Name of the field in the index. Its value denotes the probability (as per some ML algorithm) of the example being classified as positive +<7> The remaining parameters are the metrics to be calculated based on the two fields described above. +<8> https://en.wikipedia.org/wiki/Precision_and_recall[Precision] calculated at thresholds: 0.4, 0.5 and 0.6 +<9> https://en.wikipedia.org/wiki/Precision_and_recall[Recall] calculated at thresholds: 0.5 and 0.7 +<10> https://en.wikipedia.org/wiki/Confusion_matrix[Confusion matrix] calculated at threshold 0.5 +<11> https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve[AuC ROC] calculated and the curve points returned include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/query-builders.asciidoc b/docs/java-rest/high-level/query-builders.asciidoc index 9a3d8d16224..a2706e7ad8a 100644 --- a/docs/java-rest/high-level/query-builders.asciidoc +++ b/docs/java-rest/high-level/query-builders.asciidoc @@ -85,6 +85,7 @@ This page lists all the available search queries with their corresponding `Query | {ref}/query-dsl-percolate-query.html[Percolate] | {percolate-ref}/PercolateQueryBuilder.html[PercolateQueryBuilder] | | {ref}/query-dsl-wrapper-query.html[Wrapper] | {query-ref}/WrapperQueryBuilder.html[WrapperQueryBuilder] | {query-ref}/QueryBuilders.html#wrapperQuery-java.lang.String-[QueryBuilders.wrapperQuery()] | {ref}/query-dsl-rank-feature-query.html[Rank Feature] | {mapper-extras-ref}/RankFeatureQuery.html[RankFeatureQueryBuilder] | +| {ref}/query-dsl-pinned-query.html[Pinned Query] | The PinnedQueryBuilder is packaged as part of the xpack-core module | |====== ==== Span queries diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 06c1dca33fa..42e28dd4785 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -295,6 +295,7 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <<{upid}-start-data-frame-analytics>> * <<{upid}-stop-data-frame-analytics>> * <<{upid}-evaluate-data-frame>> +* <<{upid}-estimate-memory-usage>> * <<{upid}-put-filter>> * <<{upid}-get-filters>> * <<{upid}-update-filter>> @@ -346,6 +347,7 @@ include::ml/delete-data-frame-analytics.asciidoc[] include::ml/start-data-frame-analytics.asciidoc[] include::ml/stop-data-frame-analytics.asciidoc[] include::ml/evaluate-data-frame.asciidoc[] +include::ml/estimate-memory-usage.asciidoc[] include::ml/put-filter.asciidoc[] include::ml/get-filters.asciidoc[] include::ml/update-filter.asciidoc[] diff --git a/docs/plugins/analysis-kuromoji.asciidoc b/docs/plugins/analysis-kuromoji.asciidoc index 383df5afb48..8ccea28beda 100644 --- a/docs/plugins/analysis-kuromoji.asciidoc +++ b/docs/plugins/analysis-kuromoji.asciidoc @@ -98,6 +98,39 @@ dictionary to `$ES_HOME/config/userdict_ja.txt`: 東京スカイツリー,東京 スカイツリー,トウキョウ スカイツリー,カスタム名詞 ----------------------- +-- + +You can also inline the rules directly in the tokenizer definition using +the `user_dictionary_rules` option: + +[source,js] +-------------------------------------------------- +PUT nori_sample +{ + "settings": { + "index": { + "analysis": { + "tokenizer": { + "kuromoji_user_dict": { + "type": "kuromoji_tokenizer", + "mode": "extended", + "user_dictionary_rules": ["東京スカイツリー,東京 スカイツリー,トウキョウ スカイツリー,カスタム名詞"] + } + }, + "analyzer": { + "my_analyzer": { + "type": "custom", + "tokenizer": "kuromoji_user_dict" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +-- + `nbest_cost`/`nbest_examples`:: + -- diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index fdc053058d3..6a75df02c5f 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -21,33 +21,11 @@ ability to "exclude" (`-`), for example: `test*,-test3`. All multi index APIs support the following url query string parameters: -[horizontal] -`ignore_unavailable`:: +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] -Controls whether to ignore if any specified indices are unavailable, -including indices that don't exist or closed indices. Either `true` or `false` -can be specified. +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] -`allow_no_indices`:: - -Controls whether to fail if a wildcard indices expression results in no -concrete indices. Either `true` or `false` can be specified. For example if -the wildcard expression `foo*` is specified and no indices are available that -start with `foo`, then depending on this setting the request will fail. This -setting is also applicable when `_all`, `*`, or no index has been specified. This -settings also applies for aliases, in case an alias points to a closed index. - -`expand_wildcards`:: - -Controls what kind of concrete indices that wildcard indices expressions can expand -to. If `open` is specified then the wildcard expression is expanded to only -open indices. If `closed` is specified then the wildcard expression is -expanded only to closed indices. Also both values (`open,closed`) can be -specified to expand to all indices. -+ -If `none` is specified then wildcard expansion will be disabled. If `all` -is specified, wildcard expressions will expand to all indices (this is equivalent -to specifying `open,closed`). +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] The defaults settings for the above parameters depend on the API being used. diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index 619374504c0..dc385a91e03 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -79,11 +79,11 @@ The API returns the following response: [source,txt] -------------------------------------------------- -alias index filter routing.index routing.search -alias1 test1 - - - -alias2 test1 * - - -alias3 test1 - 1 1 -alias4 test1 - 2 1,2 +alias index filter routing.index routing.search is_write_index +alias1 test1 - - - - +alias2 test1 * - - - +alias3 test1 - 1 1 - +alias4 test1 - 2 1,2 - -------------------------------------------------- // TESTRESPONSE[s/[*]/[*]/ non_json] diff --git a/docs/reference/docs/delete.asciidoc b/docs/reference/docs/delete.asciidoc index 3c30f8a5198..97a7f658966 100644 --- a/docs/reference/docs/delete.asciidoc +++ b/docs/reference/docs/delete.asciidoc @@ -1,53 +1,35 @@ [[docs-delete]] === Delete API +++++ +Delete +++++ -The delete API allows to delete a JSON document from a specific -index based on its id. The following example deletes the JSON document -from an index called `twitter` with ID `1`: +Removes a JSON document from the specified index. -[source,js] --------------------------------------------------- -DELETE /twitter/_doc/1 --------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] +[[docs-delete-api-request]] +==== {api-request-title} -The result of the above delete operation is: +`DELETE //_doc/<_id>` -[source,js] --------------------------------------------------- -{ - "_shards" : { - "total" : 2, - "failed" : 0, - "successful" : 2 - }, - "_index" : "twitter", - "_type" : "_doc", - "_id" : "1", - "_version" : 2, - "_primary_term": 1, - "_seq_no": 5, - "result": "deleted" -} --------------------------------------------------- -// TESTRESPONSE[s/"successful" : 2/"successful" : 1/] -// TESTRESPONSE[s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] -// TESTRESPONSE[s/"_seq_no" : 5/"_seq_no" : $body._seq_no/] +[[docs-delete-api-desc]] +==== {api-description-title} + +You use DELETE to remove a document from an index. You must specify the +index name and document ID. [float] [[optimistic-concurrency-control-delete]] -==== Optimistic concurrency control +===== Optimistic concurrency control Delete operations can be made conditional and only be performed if the last -modification to the document was assigned the sequence number and primary +modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` -and a status code of 409. See <> for more details. +and a status code of 409. See <> for more details. [float] [[delete-versioning]] -==== Versioning +===== Versioning Each document indexed is versioned. When deleting a document, the `version` can be specified to make sure the relevant document we are trying to delete is @@ -60,11 +42,17 @@ determined by the `index.gc_deletes` index setting and defaults to 60 seconds. [float] [[delete-routing]] -==== Routing +===== Routing + +If routing is used during indexing, the routing value also needs to be +specified to delete a document. + +If the `_routing` mapping is set to `required` and no routing value is +specified, the delete API throws a `RoutingMissingException` and rejects +the request. + +For example: -When indexing using the ability to control the routing, in order to -delete a document, the routing value should also be provided. For -example: //// Example to delete with routing @@ -87,26 +75,21 @@ DELETE /twitter/_doc/1?routing=kimchy // CONSOLE // TEST[continued] -The above will delete a tweet with id `1`, but will be routed based on the -user. Note that issuing a delete without the correct routing will cause the -document to not be deleted. - -When the `_routing` mapping is set as `required` and no routing value is -specified, the delete API will throw a `RoutingMissingException` and reject -the request. +This request deletes the tweet with id `1`, but it is routed based on the +user. The document is not deleted if the correct routing is not specified. [float] [[delete-index-creation]] -==== Automatic index creation +===== Automatic index creation If an <> is used, -the delete operation automatically creates an index if it has not been -created before (check out the <> -for manually creating an index). +the delete operation automatically creates the specified index if it does not +exist. For information about manually creating indices, see +<>. [float] [[delete-distributed]] -==== Distributed +===== Distributed The delete operation gets hashed into a specific shard id. It then gets redirected into the primary shard within that id group, and replicated @@ -114,7 +97,7 @@ redirected into the primary shard within that id group, and replicated [float] [[delete-wait-for-active-shards]] -==== Wait For Active Shards +===== Wait for active shards When making delete requests, you can set the `wait_for_active_shards` parameter to require a minimum number of shard copies to be active @@ -124,15 +107,14 @@ example. [float] [[delete-refresh]] -==== Refresh +===== Refresh Control when the changes made by this request are visible to search. See <>. - [float] [[delete-timeout]] -==== Timeout +===== Timeout The primary shard assigned to perform the delete operation might not be available when the delete operation is executed. Some reasons for this @@ -149,3 +131,68 @@ DELETE /twitter/_doc/1?timeout=5m -------------------------------------------------- // CONSOLE // TEST[setup:twitter] + +[[docs-delete-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) Name of the target index. + +`<_id>`:: +(Required, string) Unique identifier for the document. + +[[docs-delete-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-seq-no] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-primary-term] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-pipeline] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-routing] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeout] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version-type] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-wait-for-active-shards] + +[[docs-delete-api-example]] +==== {api-examples-title} + +Delete the JSON document `1` from the `twitter` index: + +[source,js] +-------------------------------------------------- +DELETE /twitter/_doc/1 +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +The API returns the following result: + +[source,js] +-------------------------------------------------- +{ + "_shards" : { + "total" : 2, + "failed" : 0, + "successful" : 2 + }, + "_index" : "twitter", + "_type" : "_doc", + "_id" : "1", + "_version" : 2, + "_primary_term": 1, + "_seq_no": 5, + "result": "deleted" +} +-------------------------------------------------- +// TESTRESPONSE[s/"successful" : 2/"successful" : 1/] +// TESTRESPONSE[s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] +// TESTRESPONSE[s/"_seq_no" : 5/"_seq_no" : $body._seq_no/] diff --git a/docs/reference/docs/get.asciidoc b/docs/reference/docs/get.asciidoc index 3e9f1dc053e..14f37770fa5 100644 --- a/docs/reference/docs/get.asciidoc +++ b/docs/reference/docs/get.asciidoc @@ -1,9 +1,235 @@ [[docs-get]] === Get API +++++ +Get +++++ -The get API allows to get a JSON document from the index based on -its id. The following example gets a JSON document from an index called -twitter with id valued 0: +Retrieves the specified JSON document from an index. + +[[docs-get-api-request]] +==== {api-request-title} + +`GET /_doc/<_id>` + +`HEAD /_doc/<_id>` + +`GET /_source/<_id>` + +`HEAD /_source/<_id>` + +[[docs-get-api-desc]] +==== {api-description-title} +You use GET to retrieve a document and its source or stored fields from a +particular index. Use HEAD to verify that a document exists. You can +use the `_source` resource retrieve just the document source or verify +that it exists. + +[float] +[[realtime]] +===== Realtime + +By default, the get API is realtime, and is not affected by the refresh +rate of the index (when data will become visible for search). If a document +has been updated but is not yet refreshed, the get API will issue a refresh +call in-place to make the document visible. This will also make other documents +changed since the last refresh visible. In order to disable realtime GET, +one can set the `realtime` parameter to `false`. + +[float] +[[get-source-filtering]] +===== Source filtering + +By default, the get operation returns the contents of the `_source` field unless +you have used the `stored_fields` parameter or if the `_source` field is disabled. +You can turn off `_source` retrieval by using the `_source` parameter: + +[source,js] +-------------------------------------------------- +GET twitter/_doc/0?_source=false +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +If you only need one or two fields from the `_source`, use the `_source_includes` +or `_source_excludes` parameters to include or filter out particular fields. +This can be especially helpful with large documents where partial retrieval can +save on network overhead. Both parameters take a comma separated list +of fields or wildcard expressions. Example: + +[source,js] +-------------------------------------------------- +GET twitter/_doc/0?_source_includes=*.id&_source_excludes=entities +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +If you only want to specify includes, you can use a shorter notation: + +[source,js] +-------------------------------------------------- +GET twitter/_doc/0?_source=*.id,retweeted +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +[float] +[[get-routing]] +===== Routing + +If routing is used during indexing, the routing value also needs to be +specified to retrieve a document. For example: + +[source,js] +-------------------------------------------------- +GET twitter/_doc/2?routing=user1 +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +This request gets the tweet with id `2`, but it is routed based on the +user. The document is not fetched if the correct routing is not specified. + +[float] +[[preference]] +===== Preference + +Controls a `preference` of which shard replicas to execute the get +request on. By default, the operation is randomized between the shard +replicas. + +The `preference` can be set to: + +`_local`:: + The operation will prefer to be executed on a local + allocated shard if possible. + +Custom (string) value:: + A custom value will be used to guarantee that + the same shards will be used for the same custom value. This can help + with "jumping values" when hitting different shards in different refresh + states. A sample value can be something like the web session id, or the + user name. + +[float] +[[get-refresh]] +===== Refresh + +The `refresh` parameter can be set to `true` in order to refresh the +relevant shard before the get operation and make it searchable. Setting +it to `true` should be done after careful thought and verification that +this does not cause a heavy load on the system (and slows down +indexing). + +[float] +[[get-distributed]] +===== Distributed + +The get operation gets hashed into a specific shard id. It then gets +redirected to one of the replicas within that shard id and returns the +result. The replicas are the primary shard and its replicas within that +shard id group. This means that the more replicas we have, the +better GET scaling we will have. + +[float] +[[get-versioning]] +===== Versioning support + +You can use the `version` parameter to retrieve the document only if +its current version is equal to the specified one. This behavior is the same +for all version types with the exception of version type `FORCE` which always +retrieves the document. Note that `FORCE` version type is deprecated. + +Internally, Elasticsearch has marked the old document as deleted and added an +entirely new document. The old version of the document doesn’t disappear +immediately, although you won’t be able to access it. Elasticsearch cleans up +deleted documents in the background as you continue to index more data. + +[[docs-get-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) Name of the index that contains the document. + +`<_id>`:: +(Required, string) Unique identifier of the document. + +[[docs-get-api-query-params]] +==== {api-query-parms-title} + +`preference`:: +(Optional, string) Specify the node or shard the operation should +be performed on (default: random). + +`realtime`:: +(Optional, boolean) Set to `false` to disable real time GET +(default: `true`). See <>. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-routing] + +`stored_fields`:: +(Optional, boolean) Set to `true` to retrieve the document fields stored in the +index rather than the document `_source` (default: `false`). + +`_source`:: +(Optional, list) Set to `false` to disable source retrieval (default: `true`). + You can also specify a comma-separated list of the fields +you want to retrieve. + +`_source_excludes`:: +(Optional, list) Specify the source fields you want to exclude. + +`_source_includes`:: +(Optional, list) Specify the source fields you want to retrieve. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version-type] + +[[docs-get-api-response-body]] +==== {api-response-body-title} + +`_index`:: +The name of the index the document belongs to. + +`_type`:: +The document type. {es} indices now support a single document type, `_doc`. + +`_id`:: +The unique identifier for the document. + +`_version`:: +The document version. Incremented each time the document is updated. + +`_seq_no`:: +The sequence number assigned to the document for the indexing +operation. Sequence numbers are used to ensure an older version of a document +doesn’t overwrite a newer version. See <>. + +`_primary_term`:: +The primary term assigned to the document for the indexing operation. +See <>. + +`found`:: +Indicates whether the document exists: `true` or `false`. + +`_routing`:: +The explicit routing, if set. + +'_source':: +If `found` is `true`, contains the document data formatted in JSON. +Excluded if the `_source` parameter is set to `false` or the `stored_fields` +paramter is set to `true`. + +'_fields':: +If the `stored_fields` parameter is set to `true` and `found` is +`true`, contains the document fields stored in the index. + +[[docs-get-api-example]] +==== {api-examples-title} + +Retrieve the JSON document with the `_id` 0 from the `twitter` index: [source,js] -------------------------------------------------- @@ -12,7 +238,7 @@ GET twitter/_doc/0 // CONSOLE // TEST[setup:twitter] -The result of the above get operation is: +The API returns the following result: [source,js] -------------------------------------------------- @@ -34,13 +260,7 @@ The result of the above get operation is: -------------------------------------------------- // TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] -The above result includes the `_index`, `_id`, and `_version` -of the document we wish to retrieve, including the actual `_source` -of the document if it could be found (as indicated by the `found` -field in the response). - -The API also allows to check for the existence of a document using -`HEAD`, for example: +Check to see if a document with the `_id` 0 exists: [source,js] -------------------------------------------------- @@ -49,60 +269,50 @@ HEAD twitter/_doc/0 // CONSOLE // TEST[setup:twitter] -[float] -[[realtime]] -==== Realtime - -By default, the get API is realtime, and is not affected by the refresh -rate of the index (when data will become visible for search). If a document -has been updated but is not yet refreshed, the get API will issue a refresh -call in-place to make the document visible. This will also make other documents -changed since the last refresh visible. In order to disable realtime GET, -one can set the `realtime` parameter to `false`. +{es} returns a status code of `200 - OK` if the document exists, or +`404 - Not Found` if it doesn't. [float] -[[get-source-filtering]] -==== Source filtering +[[_source]] +===== Get the source field only -By default, the get operation returns the contents of the `_source` field unless -you have used the `stored_fields` parameter or if the `_source` field is disabled. -You can turn off `_source` retrieval by using the `_source` parameter: +Use the `/_source/` resource to get +just the `_source` field of a document. For example: [source,js] -------------------------------------------------- -GET twitter/_doc/0?_source=false +GET twitter/_source/1 -------------------------------------------------- // CONSOLE -// TEST[setup:twitter] +// TEST[continued] -If you only need one or two fields from the complete `_source`, you can use the `_source_includes` -and `_source_excludes` parameters to include or filter out the parts you need. This can be especially helpful -with large documents where partial retrieval can save on network overhead. Both parameters take a comma separated list -of fields or wildcard expressions. Example: +You can use the source filtering parameters to control which parts of the +`_source` are returned: [source,js] -------------------------------------------------- -GET twitter/_doc/0?_source_includes=*.id&_source_excludes=entities +GET twitter/_source/1/?_source_includes=*.id&_source_excludes=entities -------------------------------------------------- // CONSOLE -// TEST[setup:twitter] +// TEST[continued] -If you only want to specify includes, you can use a shorter notation: +You can use HEAD with the `_source` endpoint to efficiently +test whether or not the document _source exists. A document's source is not +available if it is disabled in the <>. [source,js] -------------------------------------------------- -GET twitter/_doc/0?_source=*.id,retweeted +HEAD twitter/_source/1 -------------------------------------------------- // CONSOLE -// TEST[setup:twitter] +// TEST[continued] [float] [[get-stored-fields]] -==== Stored Fields +===== Get stored fields -The get operation allows specifying a set of stored fields that will be -returned by passing the `stored_fields` parameter. -If the requested fields are not stored, they will be ignored. +Use the `stored_fields` parameter to specify the set of stored fields you want +to retrieve. Any requested fields that are not stored are ignored. Consider for instance the following mapping: [source,js] @@ -147,7 +357,7 @@ GET twitter/_doc/1?stored_fields=tags,counter // CONSOLE // TEST[continued] -The result of the above get operation is: +The API returns the following result: [source,js] -------------------------------------------------- @@ -168,11 +378,10 @@ The result of the above get operation is: -------------------------------------------------- // TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] - Field values fetched from the document itself are always returned as an array. -Since the `counter` field is not stored the get request simply ignores it when trying to get the `stored_fields.` +Since the `counter` field is not stored, the get request ignores it. -It is also possible to retrieve metadata fields like the `_routing` field: +You can also retrieve metadata fields like the `_routing` field: [source,js] -------------------------------------------------- @@ -192,7 +401,7 @@ GET twitter/_doc/2?routing=user1&stored_fields=tags,counter // CONSOLE // TEST[continued] -The result of the above get operation is: +The API returns the following result: [source,js] -------------------------------------------------- @@ -214,113 +423,5 @@ The result of the above get operation is: -------------------------------------------------- // TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ s/"_primary_term" : 1/"_primary_term" : $body._primary_term/] -Also only leaf fields can be returned via the `stored_field` option. So object fields can't be returned and such requests -will fail. - -[float] -[[_source]] -==== Getting the +_source+ directly - -Use the `/{index}/_source/{id}` endpoint to get -just the `_source` field of the document, -without any additional content around it. For example: - -[source,js] --------------------------------------------------- -GET twitter/_source/1 --------------------------------------------------- -// CONSOLE -// TEST[continued] - -You can also use the same source filtering parameters to control which parts of the `_source` will be returned: - -[source,js] --------------------------------------------------- -GET twitter/_source/1/?_source_includes=*.id&_source_excludes=entities --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Note, there is also a HEAD variant for the _source endpoint to efficiently test for document _source existence. -An existing document will not have a _source if it is disabled in the <>. - -[source,js] --------------------------------------------------- -HEAD twitter/_source/1 --------------------------------------------------- -// CONSOLE -// TEST[continued] - -[float] -[[get-routing]] -==== Routing - -When indexing using the ability to control the routing, in order to get -a document, the routing value should also be provided. For example: - -[source,js] --------------------------------------------------- -GET twitter/_doc/2?routing=user1 --------------------------------------------------- -// CONSOLE -// TEST[continued] - -The above will get a tweet with id `2`, but will be routed based on the -user. Note that issuing a get without the correct routing will cause the -document not to be fetched. - -[float] -[[preference]] -==== Preference - -Controls a `preference` of which shard replicas to execute the get -request on. By default, the operation is randomized between the shard -replicas. - -The `preference` can be set to: - -`_local`:: - The operation will prefer to be executed on a local - allocated shard if possible. - -Custom (string) value:: - A custom value will be used to guarantee that - the same shards will be used for the same custom value. This can help - with "jumping values" when hitting different shards in different refresh - states. A sample value can be something like the web session id, or the - user name. - -[float] -[[get-refresh]] -==== Refresh - -The `refresh` parameter can be set to `true` in order to refresh the -relevant shard before the get operation and make it searchable. Setting -it to `true` should be done after careful thought and verification that -this does not cause a heavy load on the system (and slows down -indexing). - -[float] -[[get-distributed]] -==== Distributed - -The get operation gets hashed into a specific shard id. It then gets -redirected to one of the replicas within that shard id and returns the -result. The replicas are the primary shard and its replicas within that -shard id group. This means that the more replicas we have, the -better GET scaling we will have. - - -[float] -[[get-versioning]] -==== Versioning support - -You can use the `version` parameter to retrieve the document only if -its current version is equal to the specified one. This behavior is the same -for all version types with the exception of version type `FORCE` which always -retrieves the document. Note that `FORCE` version type is deprecated. - -Internally, Elasticsearch has marked the old document as deleted and added an -entirely new document. The old version of the document doesn’t disappear -immediately, although you won’t be able to access it. Elasticsearch cleans up -deleted documents in the background as you continue to index more data. +Only leaf fields can be retrieved with the `stored_field` option. Object fields +can't be returned--if specified, the request fails. diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index 2ecd929ef55..5bbfaf4bf0c 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -1,77 +1,144 @@ [[docs-index_]] === Index API +++++ +Index +++++ IMPORTANT: See <>. -The index API adds or updates a JSON document in a specific index, -making it searchable. The following example inserts the JSON document -into the "twitter" index with an id of 1: +Adds a JSON document to the specified index and makes +it searchable. If the document already exists, +updates the document and increments its version. -[source,js] --------------------------------------------------- -PUT twitter/_doc/1 -{ - "user" : "kimchy", - "post_date" : "2009-11-15T14:12:12", - "message" : "trying out Elasticsearch" -} --------------------------------------------------- -// CONSOLE +[[docs-index-api-request]] +==== {api-request-title} -The result of the above index operation is: +`PUT //_doc/<_id>` -[source,js] --------------------------------------------------- -{ - "_shards" : { - "total" : 2, - "failed" : 0, - "successful" : 2 - }, - "_index" : "twitter", - "_type" : "_doc", - "_id" : "1", - "_version" : 1, - "_seq_no" : 0, - "_primary_term" : 1, - "result" : "created" -} --------------------------------------------------- -// TESTRESPONSE[s/"successful" : 2/"successful" : 1/] +`POST //_doc/` -The `_shards` header provides information about the replication process of the index operation: +`PUT //_create/<_id>` -`total`:: Indicates how many shard copies (primary and replica shards) the index operation should be executed on. -`successful`:: Indicates the number of shard copies the index operation succeeded on. -`failed`:: An array that contains replication-related errors in the case an index operation failed on a replica shard. +`POST //_create/<_id>` -The index operation is successful in the case `successful` is at least 1. +[[docs-index-api-path-params]] +==== {api-path-parms-title} -NOTE: Replica shards may not all be started when an indexing operation successfully returns (by default, only the - primary is required, but this behavior can be <>). In that case, - `total` will be equal to the total shards based on the `number_of_replicas` setting and `successful` will be - equal to the number of shards started (primary plus replicas). If there were no failures, the `failed` will be 0. +``:: +(Required, string) Name of the target index. By default, the index is created +automatically if it doesn't exist. For more information, see <>. + +`<_id>`:: +(Optional, string) Unique identifier for the document. Required if you are +using a PUT request. Omit to automatically generate an ID when using a +POST request. + + +[[docs-index-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-seq-no] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-primary-term] + +`op_type`:: +(Optional, enum) Set to `create` to only index the document +if it does not already exist (_put if absent_). If a document with the specified +`_id` already exists, the indexing operation will fail. Same as using the +`/_create` endpoint. Valid values: `index`, `create`. Default: `index`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-pipeline] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-routing] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-version-type] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-wait-for-active-shards] + +[[docs-index-api-request-body]] +==== {api-request-body-title} + +``:: +(Required, string) Request body contains the JSON source for the document +data. + +[[docs-index-api-response-body]] +==== {api-response-body-title} + +`_shards`:: +Provides information about the replication process of the index operation. + +`_shards.total`:: +Indicates how many shard copies (primary and replica shards) the index operation +should be executed on. + +`_shards.successful`:: +Indicates the number of shard copies the index operation succeeded on. +When the index operation is successful, `successful` is at least 1. ++ +NOTE: Replica shards might not all be started when an indexing operation +returns successfully--by default, only the primary is required. Set +`wait_for_active_shards` to change this default behavior. See +<>. + +`_shards.failed`:: +An array that contains replication-related errors in the case an index operation +failed on a replica shard. 0 indicates there were no failures. + +`_index`:: +The name of the index the document was added to. + +`_type`:: +The document type. {es} indices now support a single document type, `_doc`. + +`_id`:: +The unique identifier for the added document. + +`_version`:: +The document version. Incremented each time the document is updated. + +`_seq_no`:: +The sequence number assigned to the document for the indexing operation. +Sequence numbers are used to ensure an older version of a document +doesn’t overwrite a newer version. See <>. + +`_primary_term`:: +The primary term assigned to the document for the indexing operation. +See <>. + +`result`:: +The result of the indexing operation, `created` or `updated`. + +[[docs-index-api-desc]] +==== {api-description-title} + +You can index a new JSON document with the `_doc` or `_create` resource. Using +`_create` guarantees that the document is only indexed if it does not already +exist. To update an existing document, you must use the `_doc` resource. -[float] [[index-creation]] -==== Automatic Index Creation +===== Create indices automatically -The index operation automatically creates an index if it does not already -exist, and applies any <> that are -configured. The index operation also creates a dynamic mapping if one does not -already exist. By default, new fields and objects will automatically be added -to the mapping definition if needed. Check out the <> section -for more information on mapping definitions, and the -<> API for information about updating mappings -manually. +If the specified index does not already exist, by default the index operation +automatically creates it and applies any configured +<>. If no mapping exists, the index opration +creates a dynamic mapping. By default, new fields and objects are +automatically added to the mapping if needed. For more information about field +mapping, see <> and the <> API. Automatic index creation is controlled by the `action.auto_create_index` -setting. This setting defaults to `true`, meaning that indices are always -automatically created. Automatic index creation can be permitted only for -indices matching certain patterns by changing the value of this setting to a -comma-separated list of these patterns. It can also be explicitly permitted and -forbidden by prefixing patterns in the list with a `+` or `-`. Finally it can -be completely disabled by changing this setting to `false`. +setting. This setting defaults to `true`, which allows any index to be created +automatically. You can modify this setting to explicitly allow or block +automatic creation of indices that match specified patterns, or set it to +`false` to disable automatic index creation entirely. Specify a +comma-separated list of patterns you want to allow, or prefix each pattern with +`+` or `-` to indicate whether it should be allowed or blocked. [source,js] -------------------------------------------------- @@ -98,56 +165,30 @@ PUT _cluster/settings -------------------------------------------------- // CONSOLE -<1> Permit only the auto-creation of indices called `twitter`, `index10`, no -other index matching `index1*`, and any other index matching `ind*`. The -patterns are matched in the order in which they are given. +<1> Allow auto-creation of indices called `twitter` or `index10`, block the +creation of indices that match the pattern `index1*`, and allow creation of +any other indices that match the `ind*` pattern. Patterns are matched in +the order specified. -<2> Completely disable the auto-creation of indices. +<2> Disable automatic index creation entirely. -<3> Permit the auto-creation of indices with any name. This is the default. +<3> Allow automatic creation of any index. This is the default. [float] [[operation-type]] -==== Operation Type +===== Put if absent -The index operation also accepts an `op_type` that can be used to force -a `create` operation, allowing for "put-if-absent" behavior. When -`create` is used, the index operation will fail if a document by that id +You can force a create operation by using the `_create` resource or +setting the `op_type` parameter to _create_. In this case, +the index operation fails if a document with the specified ID already exists in the index. -Here is an example of using the `op_type` parameter: - -[source,js] --------------------------------------------------- -PUT twitter/_doc/1?op_type=create -{ - "user" : "kimchy", - "post_date" : "2009-11-15T14:12:12", - "message" : "trying out Elasticsearch" -} --------------------------------------------------- -// CONSOLE - -Another option to specify `create` is to use the following uri: - -[source,js] --------------------------------------------------- -PUT twitter/_create/1 -{ - "user" : "kimchy", - "post_date" : "2009-11-15T14:12:12", - "message" : "trying out Elasticsearch" -} --------------------------------------------------- -// CONSOLE - [float] -==== Automatic ID Generation +===== Create document IDs automatically -The index operation can be executed without specifying the id. In such a -case, an id will be generated automatically. In addition, the `op_type` -will automatically be set to `create`. Here is an example (note the -*POST* used instead of *PUT*): +If you don't specify a document ID when using POST, the `op_type` is +automatically set to `create` and the index operation generates a unique ID +for the document. [source,js] -------------------------------------------------- @@ -160,7 +201,7 @@ POST twitter/_doc/ -------------------------------------------------- // CONSOLE -The result of the above index operation is: +The API returns the following result: [source,js] -------------------------------------------------- @@ -183,17 +224,17 @@ The result of the above index operation is: [float] [[optimistic-concurrency-control-index]] -==== Optimistic concurrency control +===== Optimistic concurrency control Index operations can be made conditional and only be performed if the last -modification to the document was assigned the sequence number and primary +modification to the document was assigned the sequence number and primary term specified by the `if_seq_no` and `if_primary_term` parameters. If a mismatch is detected, the operation will result in a `VersionConflictException` -and a status code of 409. See <> for more details. +and a status code of 409. See <> for more details. [float] [[index-routing]] -==== Routing +===== Routing By default, shard placement ? or `routing` ? is controlled by using a hash of the document's id value. For more explicit control, the value @@ -211,11 +252,11 @@ POST twitter/_doc?routing=kimchy -------------------------------------------------- // CONSOLE -In the example above, the "_doc" document is routed to a shard based on +In this example, the document is routed to a shard based on the `routing` parameter provided: "kimchy". -When setting up explicit mapping, the `_routing` field can be optionally -used to direct the index operation to extract the routing value from the +When setting up explicit mapping, you can also use the `_routing` field +to direct the index operation to extract the routing value from the document itself. This does come at the (very minimal) cost of an additional document parsing pass. If the `_routing` mapping is defined and set to be `required`, the index operation will fail if no routing @@ -223,7 +264,7 @@ value is provided or extracted. [float] [[index-distributed]] -==== Distributed +===== Distributed The index operation is directed to the primary shard based on its route (see the Routing section above) and performed on the actual node @@ -232,7 +273,7 @@ if needed, the update is distributed to applicable replicas. [float] [[index-wait-for-active-shards]] -==== Wait For Active Shards +===== Active shards To improve the resiliency of writes to the system, indexing operations can be configured to wait for a certain number of active shard copies @@ -290,14 +331,14 @@ replication succeeded/failed. [float] [[index-refresh]] -==== Refresh +===== Refresh Control when the changes made by this request are visible to search. See <>. [float] [[index-noop]] -==== Noop Updates +===== Noop updates When updating a document using the index API a new version of the document is always created even if the document hasn't changed. If this isn't acceptable @@ -312,7 +353,7 @@ Elasticsearch runs on the shard receiving the updates. [float] [[timeout]] -==== Timeout +===== Timeout The primary shard assigned to perform the index operation might not be available when the index operation is executed. Some reasons for this @@ -336,15 +377,15 @@ PUT twitter/_doc/1?timeout=5m [float] [[index-versioning]] -==== Versioning +===== Versioning -Each indexed document is given a version number. By default, +Each indexed document is given a version number. By default, internal versioning is used that starts at 1 and increments with each update, deletes included. Optionally, the version number can be set to an external value (for example, if maintained in a database). To enable this functionality, `version_type` should be set to `external`. The value provided must be a numeric, long value greater than or equal to 0, -and less than around 9.2e+18. +and less than around 9.2e+18. When using the external version type, the system checks to see if the version number passed to the index request is greater than the @@ -363,11 +404,12 @@ PUT twitter/_doc/1?version=2&version_type=external // CONSOLE // TEST[continued] -*NOTE:* Versioning is completely real time, and is not affected by the +NOTE: Versioning is completely real time, and is not affected by the near real time aspects of search operations. If no version is provided, then the operation is executed without any version checks. -The above will succeed since the supplied version of 2 is higher than +In the previous example, the operation will succeed since the supplied +version of 2 is higher than the current document version of 1. If the document was already updated and its version was set to 2 or higher, the indexing command will fail and result in a conflict (409 http status code). @@ -381,12 +423,13 @@ latest version will be used if the index operations arrive out of order for whatever reason. [float] +[[index-version-types]] ===== Version types -Next to the `external` version type explained above, Elasticsearch -also supports other types for specific use cases. Here is an overview of -the different version types and their semantics. +In addition to the `external` version type, Elasticsearch +also supports other types for specific use cases: +[[_version_types]] `internal`:: Only index the document if the given version is identical to the version of the stored document. @@ -400,8 +443,72 @@ than the version of the stored document. If there is no existing document the operation will succeed as well. The given version will be used as the new version and will be stored with the new document. The supplied version must be a non-negative long number. -*NOTE*: The `external_gte` version type is meant for special use cases and +NOTE: The `external_gte` version type is meant for special use cases and should be used with care. If used incorrectly, it can result in loss of data. There is another option, `force`, which is deprecated because it can cause primary and replica shards to diverge. +[[docs-index-api-example]] +==== {api-examples-title} + +Insert a JSON document into the `twitter` index with an `_id` of 1: + +[source,js] +-------------------------------------------------- +PUT twitter/_doc/1 +{ + "user" : "kimchy", + "post_date" : "2009-11-15T14:12:12", + "message" : "trying out Elasticsearch" +} +-------------------------------------------------- +// CONSOLE + +The API returns the following result: + +[source,js] +-------------------------------------------------- +{ + "_shards" : { + "total" : 2, + "failed" : 0, + "successful" : 2 + }, + "_index" : "twitter", + "_type" : "_doc", + "_id" : "1", + "_version" : 1, + "_seq_no" : 0, + "_primary_term" : 1, + "result" : "created" +} +-------------------------------------------------- +// TESTRESPONSE[s/"successful" : 2/"successful" : 1/] + +Use the `_create` resource to index a document into the `twitter` index if +no document with that ID exists: + +[source,js] +-------------------------------------------------- +PUT twitter/_create/1 +{ + "user" : "kimchy", + "post_date" : "2009-11-15T14:12:12", + "message" : "trying out Elasticsearch" +} +-------------------------------------------------- +// CONSOLE + +Set the `op_type` parameter to _create_ to index a document into the `twitter` +index if no document with that ID exists: + +[source,js] +-------------------------------------------------- +PUT twitter/_doc/1?op_type=create +{ + "user" : "kimchy", + "post_date" : "2009-11-15T14:12:12", + "message" : "trying out Elasticsearch" +} +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index c0cc88bb4f5..1a732cf1246 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -1,18 +1,86 @@ [[docs-update]] === Update API +++++ +Update +++++ -The update API allows to update a document based on a script provided. -The operation gets the document (collocated with the shard) from the -index, runs the script (with optional script language and parameters), -and indexes back the result (also allows to delete, or ignore the -operation). +Updates a document using the specified script. -Note, this operation still means full reindex of the document, it just -removes some network roundtrips and reduces chances of version conflicts -between the get and the index. The `_source` field needs to be enabled -for this feature to work. +[[docs-update-api-request]] +==== {api-request-title} -For example, let's index a simple doc: +`POST /` + +[[update-api-desc]] +==== {api-description-title} + +Enables you script document updates. The script can update, delete, or skip +modifying the document. The update API also supports passing a partial document, +which is merged into the existing document. To fully replace an existing +document, use the <>. + +This operation: + +. Gets the document (collocated with the shard) from the index. +. Runs the specified script. +. Indexes the result. + +The document must still be reindexed, but using `update` removes some network +roundtrips and reduces chances of version conflicts between the GET and the +index operation. + +The `_source` field must be enabled to use `update`. In addition to `_source`, +you can access the following variables through the `ctx` map: `_index`, +`_type`, `_id`, `_version`, `_routing`, and `_now` (the current timestamp). + +[[docs-update-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) Name of the target index. By default, the index is created +automatically if it doesn't exist. For more information, see <>. + +`<_id>`:: +(Required, string) Unique identifier for the document to be updated. + +[[docs-update-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-seq-no] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-primary-term] + +`lang`:: +(Optional, string) The script language. Default: `painless`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh] + +`retry_on_conflict`:: +(Optional, integer) Specify how many times should the operation be retried when + a conflict occurs. Default: 0. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-refresh] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-routing] + +`_source`:: +(Optional, list) Set to `false` to disable source retrieval (default: `true`). +You can also specify a comma-separated list of the fields you want to retrieve. + +`_source_excludes`:: +(Optional, list) Specify the source fields you want to exclude. + +`_source_includes`:: +(Optional, list) Specify the source fields you want to retrieve. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=doc-wait-for-active-shards] + +[[update-api-example]] +==== {api-examples-title} + +First, let's index a simple doc: [source,js] -------------------------------------------------- @@ -24,10 +92,8 @@ PUT test/_doc/1 -------------------------------------------------- // CONSOLE -[float] -==== Scripted updates - -Now, we can execute a script that would increment the counter: +To increment the counter, you can submit an update request with the +following script: [source,js] -------------------------------------------------- @@ -45,8 +111,8 @@ POST test/_update/1 // CONSOLE // TEST[continued] -We can add a tag to the list of tags (if the tag exists, it - still gets added, since this is a list): +Similarly, you could use and update script to add a tag to the list of tags +(this is just a list, so the tag is added even it exists): [source,js] -------------------------------------------------- @@ -64,11 +130,11 @@ POST test/_update/1 // CONSOLE // TEST[continued] -We can remove a tag from the list of tags. Note that the Painless function to -`remove` a tag takes as its parameter the array index of the element you wish -to remove, so you need a bit more logic to locate it while avoiding a runtime -error. Note that if the tag was present more than once in the list, this will -remove only one occurrence of it: +You could also remove a tag from the list of tags. The Painless +function to `remove` a tag takes the array index of the element +you want to remove. To avoid a possible runtime error, you first need to +make sure the tag exists. If the list contains duplicates of the tag, this +script just removes one occurrence. [source,js] -------------------------------------------------- @@ -86,11 +152,8 @@ POST test/_update/1 // CONSOLE // TEST[continued] -In addition to `_source`, the following variables are available through -the `ctx` map: `_index`, `_type`, `_id`, `_version`, `_routing`, -and `_now` (the current timestamp). - -We can also add a new field to the document: +You can also add and remove fields from a document. For example, this script +adds the field `new_field`: [source,js] -------------------------------------------------- @@ -102,7 +165,7 @@ POST test/_update/1 // CONSOLE // TEST[continued] -Or remove a field from the document: +Conversely, this script removes the field `new_field`: [source,js] -------------------------------------------------- @@ -114,9 +177,9 @@ POST test/_update/1 // CONSOLE // TEST[continued] -And, we can even change the operation that is executed. This example deletes -the doc if the `tags` field contains `green`, otherwise it does nothing -(`noop`): +Instead of updating the document, you can also change the operation that is +executed from within the script. For example, this request deletes the doc if +the `tags` field contains `green`, otherwise it does nothing (`noop`): [source,js] -------------------------------------------------- @@ -135,13 +198,8 @@ POST test/_update/1 // TEST[continued] [float] -==== Updates with a partial document +===== Update part of a document -The update API also supports passing a partial document, -which will be merged into the existing document (simple recursive merge, -inner merging of objects, replacing core "keys/values" and arrays). -To fully replace the existing document, the <> should -be used instead. The following partial update adds a new field to the existing document: @@ -157,14 +215,14 @@ POST test/_update/1 // CONSOLE // TEST[continued] -If both `doc` and `script` are specified, then `doc` is ignored. Best is -to put your field pairs of the partial document in the script itself. +If both `doc` and `script` are specified, then `doc` is ignored. If you +specify a scripted update, include the fields you want to update in the script. [float] -==== Detecting noop updates +===== Detect noop updates -If `doc` is specified its value is merged with the existing `_source`. -By default updates that don't change anything detect that they don't change anything and return `"result": "noop"` like this: +By default updates that don't change anything detect that they don't change +anything and return `"result": "noop"`: [source,js] -------------------------------------------------- @@ -178,9 +236,8 @@ POST test/_update/1 // CONSOLE // TEST[continued] -If `name` was `new_name` before the request was sent then the entire update -request is ignored. The `result` element in the response returns `noop` if -the request was ignored. +If the value of `name` is already `new_name`, the update +request is ignored and the `result` element in the response returns `noop`: [source,js] -------------------------------------------------- @@ -201,7 +258,7 @@ the request was ignored. -------------------------------------------------- // TESTRESPONSE -You can disable this behavior by setting `"detect_noop": false` like this: +You can disable this behavior by setting `"detect_noop": false`: [source,js] -------------------------------------------------- @@ -218,11 +275,11 @@ POST test/_update/1 [[upserts]] [float] -==== Upserts +===== Upsert If the document does not already exist, the contents of the `upsert` element -will be inserted as a new document. If the document does exist, then the -`script` will be executed instead: +are inserted as a new document. If the document exists, the +`script` is executed: [source,js] -------------------------------------------------- @@ -245,11 +302,10 @@ POST test/_update/1 [float] [[scripted_upsert]] -===== `scripted_upsert` +===== Scripted upsert -If you would like your script to run regardless of whether the document exists -or not -- i.e. the script handles initializing the document instead of the -`upsert` element -- then set `scripted_upsert` to `true`: +To run the script whether or not the document exists, set `scripted_upsert` to +`true`: [source,js] -------------------------------------------------- @@ -275,10 +331,10 @@ POST sessions/_update/dh3sgudg8gsrgl [float] [[doc_as_upsert]] -===== `doc_as_upsert` +===== Doc as upsert -Instead of sending a partial `doc` plus an `upsert` doc, setting -`doc_as_upsert` to `true` will use the contents of `doc` as the `upsert` +Instead of sending a partial `doc` plus an `upsert` doc, you can set +`doc_as_upsert` to `true` to use the contents of `doc` as the `upsert` value: [source,js] @@ -293,51 +349,3 @@ POST test/_update/1 -------------------------------------------------- // CONSOLE // TEST[continued] - -[float] -==== Parameters - -The update operation supports the following query-string parameters: - -[horizontal] -`retry_on_conflict`:: - -In between the get and indexing phases of the update, it is possible that -another process might have already updated the same document. By default, the -update will fail with a version conflict exception. The `retry_on_conflict` -parameter controls how many times to retry the update before finally throwing -an exception. - -`routing`:: - -Routing is used to route the update request to the right shard and sets the -routing for the upsert request if the document being updated doesn't exist. -Can't be used to update the routing of an existing document. - -`timeout`:: - -Timeout waiting for a shard to become available. - -`wait_for_active_shards`:: - -The number of shard copies required to be active before proceeding with the update operation. -See <> for details. - -`refresh`:: - -Control when the changes made by this request are visible to search. See -<>. - -`_source`:: - -Allows to control if and how the updated source should be returned in the response. -By default the updated source is not returned. -See <> for details. - -`if_seq_no` and `if_primary_term`:: - -Update operations can be made conditional and only be performed if the last -modification to the document was assigned the sequence number and primary -term specified by the `if_seq_no` and `if_primary_term` parameters. If a -mismatch is detected, the operation will result in a `VersionConflictException` -and a status code of 409. See <> for more details. \ No newline at end of file diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 00642195b89..65065946d99 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -22,7 +22,7 @@ how {es} works. If you're already familiar with {es} and want to see how it work with the rest of the stack, you might want to jump to the {stack-gs}/get-started-elastic-stack.html[Elastic Stack Tutorial] to see how to set up a system monitoring solution with {es}, {kib}, -{beats}, and {ls}. +{beats}, and {ls}. TIP: The fastest way to get started with {es} is to https://www.elastic.co/cloud/elasticsearch-service/signup[start a free 14-day @@ -135,8 +135,8 @@ Windows: The additional nodes are assigned unique IDs. Because you're running all three nodes locally, they automatically join the cluster with the first node. -. Use the `cat health` API to verify that your three-node cluster is up running. -The `cat` APIs return information about your cluster and indices in a +. Use the cat health API to verify that your three-node cluster is up running. +The cat APIs return information about your cluster and indices in a format that's easier to read than raw JSON. + You can interact directly with your cluster by submitting HTTP requests to @@ -155,8 +155,8 @@ GET /_cat/health?v -------------------------------------------------- // CONSOLE + -The response should indicate that the status of the _elasticsearch_ cluster -is _green_ and it has three nodes: +The response should indicate that the status of the `elasticsearch` cluster +is `green` and it has three nodes: + [source,txt] -------------------------------------------------- @@ -191,8 +191,8 @@ Once you have a cluster up and running, you're ready to index some data. There are a variety of ingest options for {es}, but in the end they all do the same thing: put JSON documents into an {es} index. -You can do this directly with a simple POST request that identifies -the index you want to add the document to and specifies one or more +You can do this directly with a simple PUT request that specifies +the index you want to add the document, a unique document ID, and one or more `"field": "value"` pairs in the request body: [source,js] @@ -204,9 +204,9 @@ PUT /customer/_doc/1 -------------------------------------------------- // CONSOLE -This request automatically creates the _customer_ index if it doesn't already +This request automatically creates the `customer` index if it doesn't already exist, adds a new document that has an ID of `1`, and stores and -indexes the _name_ field. +indexes the `name` field. Since this is a new document, the response shows that the result of the operation was that version 1 of the document was created: @@ -264,46 +264,22 @@ and shows the original source fields that were indexed. // TESTRESPONSE[s/"_seq_no" : \d+/"_seq_no" : $body._seq_no/ ] // TESTRESPONSE[s/"_primary_term" : \d+/"_primary_term" : $body._primary_term/] - [float] [[getting-started-batch-processing]] -=== Batch processing +=== Indexing documents in bulk -In addition to being able to index, update, and delete individual documents, Elasticsearch also provides the ability to perform any of the above operations in batches using the {ref}/docs-bulk.html[`_bulk` API]. This functionality is important in that it provides a very efficient mechanism to do multiple operations as fast as possible with as few network roundtrips as possible. +If you have a lot of documents to index, you can submit them in batches with +the {ref}/docs-bulk.html[bulk API]. Using bulk to batch document +operations is significantly faster than submitting requests individually as it minimizes network roundtrips. -As a quick example, the following call indexes two documents (ID 1 - John Doe and ID 2 - Jane Doe) in one bulk operation: +The optimal batch size depends a number of factors: the document size and complexity, the indexing and search load, and the resources available to your cluster. A good place to start is with batches of 1,000 to 5,000 documents +and a total payload between 5MB and 15MB. From there, you can experiment +to find the sweet spot. -[source,js] --------------------------------------------------- -POST /customer/_bulk?pretty -{"index":{"_id":"1"}} -{"name": "John Doe" } -{"index":{"_id":"2"}} -{"name": "Jane Doe" } --------------------------------------------------- -// CONSOLE - -This example updates the first document (ID of 1) and then deletes the second document (ID of 2) in one bulk operation: - -[source,sh] --------------------------------------------------- -POST /customer/_bulk -{"update":{"_id":"1"}} -{"doc": { "name": "John Doe becomes Jane Doe" } } -{"delete":{"_id":"2"}} --------------------------------------------------- -// CONSOLE -// TEST[continued] - -Note above that for the delete action, there is no corresponding source document after it since deletes only require the ID of the document to be deleted. - -The Bulk API does not fail due to failures in one of the actions. If a single action fails for whatever reason, it will continue to process the remainder of the actions after it. When the bulk API returns, it will provide a status for each action (in the same order it was sent in) so that you can check if a specific action failed or not. - -[float] -=== Sample dataset - -Now that we've gotten a glimpse of the basics, let's try to work on a more realistic dataset. I've prepared a sample of fictitious JSON documents of customer bank account information. Each document has the following schema: +To get some data into {es} that you can start searching and analyzing: +. Download the https://github.com/elastic/elasticsearch/blob/master/docs/src/test/resources/accounts.json?raw=true[`accounts.json`] sample data set. The documents in this randomly-generated data set represent user accounts with the following information: ++ [source,js] -------------------------------------------------- { @@ -322,21 +298,19 @@ Now that we've gotten a glimpse of the basics, let's try to work on a more reali -------------------------------------------------- // NOTCONSOLE -For the curious, this data was generated using http://www.json-generator.com/[`www.json-generator.com/`], so please ignore the actual values and semantics of the data as these are all randomly generated. - -You can download the sample dataset (accounts.json) from https://github.com/elastic/elasticsearch/blob/master/docs/src/test/resources/accounts.json?raw=true[here]. Extract it to our current directory and let's load it into our cluster as follows: - +. Index the account data into the `bank` index with the following `_bulk` request: ++ [source,sh] -------------------------------------------------- curl -H "Content-Type: application/json" -XPOST "localhost:9200/bank/_bulk?pretty&refresh" --data-binary "@accounts.json" curl "localhost:9200/_cat/indices?v" -------------------------------------------------- // NOTCONSOLE - ++ //// This replicates the above in a document-testing friendly way but isn't visible in the docs: - ++ [source,js] -------------------------------------------------- GET /_cat/indices?v @@ -344,9 +318,9 @@ GET /_cat/indices?v // CONSOLE // TEST[setup:bank] //// - -And the response: - ++ +The response indicates that 1,000 documents were indexed successfully. ++ [source,txt] -------------------------------------------------- health status index uuid pri rep docs.count docs.deleted store.size pri.store.size @@ -355,8 +329,6 @@ yellow open bank l7sSYV2cQXmu6_4rJWVIww 5 1 1000 0 12 // TESTRESPONSE[s/128.6kb/\\d+(\\.\\d+)?[mk]?b/] // TESTRESPONSE[s/l7sSYV2cQXmu6_4rJWVIww/.+/ non_json] -Which means that we just successfully bulk indexed 1000 documents into the bank index. - [[getting-started-search]] == Start searching diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index c01671b4ae6..414ac59f0ba 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -76,12 +76,23 @@ commit point. Defaults to `512mb`. `index.translog.retention.size`:: -The total size of translog files to keep. Keeping more translog files increases -the chance of performing an operation based sync when recovering replicas. If -the translog files are not sufficient, replica recovery will fall back to a -file based sync. Defaults to `512mb` +When soft deletes is disabled (enabled by default in 7.0 or later), +`index.translog.retention.size` controls the total size of translog files to keep. +Keeping more translog files increases the chance of performing an operation based +sync when recovering replicas. If the translog files are not sufficient, +replica recovery will fall back to a file based sync. Defaults to `512mb` + +Both `index.translog.retention.size` and `index.translog.retention.age` should not +be specified unless soft deletes is disabled as they will be ignored. `index.translog.retention.age`:: -The maximum duration for which translog files will be kept. Defaults to `12h`. +When soft deletes is disabled (enabled by default in 7.0 or later), +`index.translog.retention.age` controls the maximum duration for which translog +files to keep. Keeping more translog files increases the chance of performing an +operation based sync when recovering replicas. If the translog files are not sufficient, +replica recovery will fall back to a file based sync. Defaults to `12h` + +Both `index.translog.retention.size` and `index.translog.retention.age` should not +be specified unless soft deletes is disabled as they will be ignored. diff --git a/docs/reference/indices/get-mapping.asciidoc b/docs/reference/indices/get-mapping.asciidoc index 69d4bb62233..ea96327bb3a 100644 --- a/docs/reference/indices/get-mapping.asciidoc +++ b/docs/reference/indices/get-mapping.asciidoc @@ -1,8 +1,10 @@ [[indices-get-mapping]] -=== Get Mapping +=== Get mapping API +++++ +Get mapping +++++ -The get mapping API allows to retrieve mapping definitions for an index or -index/type. +Retrieves <> for indices in a cluster. [source,js] -------------------------------------------------- @@ -13,10 +15,46 @@ GET /twitter/_mapping NOTE: Before 7.0.0, the 'mappings' definition used to include a type name. Although mappings in responses no longer contain a type name by default, you can still request the old format -through the parameter include_type_name. For more details, please see <>. +through the parameter `include_type_name`. For more details, please see <>. -[float] -==== Multiple Indices + +[[get-mapping-api-request]] +==== {api-request-title} + +`GET /_mapping` + +`GET /{index}/_mapping` + + +[[get-mapping-api-path-params]] +==== {api-path-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index] + + +[[get-mapping-api-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] ++ +Defaults to `open`. + +include::{docdir}/rest-api/common-parms.asciidoc[tag=include-type-name] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=local] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] + + +[[get-mapping-api-example]] +==== {api-examples-title} + +[[get-mapping-api-multi-ex]] +===== Multiple indices The get mapping API can be used to get more than one index with a single call. General usage of the API follows the following syntax: diff --git a/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc index 9f1f77052d6..4393a3365fe 100644 --- a/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/estimate-memory-usage-dfanalytics.asciidoc @@ -42,14 +42,14 @@ Serves as an advice on how to set `model_memory_limit` when creating {dfanalytic [[ml-estimate-memory-usage-dfanalytics-results]] ==== {api-response-body-title} -`expected_memory_usage_with_one_partition`:: +`expected_memory_without_disk`:: (string) Estimated memory usage under the assumption that the whole {dfanalytics} should happen in memory (i.e. without overflowing to disk). -`expected_memory_usage_with_max_partitions`:: +`expected_memory_with_disk`:: (string) Estimated memory usage under the assumption that overflowing to disk is allowed during {dfanalytics}. - `expected_memory_usage_with_max_partitions` is usually smaller than `expected_memory_usage_with_one_partition` - as using disk allows to limit the main memory needed to perform {dfanalytics}. + `expected_memory_with_disk` is usually smaller than `expected_memory_without_disk` as using disk allows to + limit the main memory needed to perform {dfanalytics}. [[ml-estimate-memory-usage-dfanalytics-example]] ==== {api-examples-title} @@ -76,8 +76,8 @@ The API returns the following results: [source,js] ---- { - "expected_memory_usage_with_one_partition": "128MB", - "expected_memory_usage_with_max_partitions": "32MB" + "expected_memory_without_disk": "128MB", + "expected_memory_with_disk": "32MB" } ---- // TESTRESPONSE \ No newline at end of file diff --git a/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc b/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc index 10c6e1c0bca..92729c3b0e2 100644 --- a/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc +++ b/docs/reference/ml/df-analytics/apis/evaluate-dfanalytics.asciidoc @@ -43,7 +43,13 @@ packages together commonly used metrics for various analyses. `index`:: (Required, object) Defines the `index` in which the evaluation will be performed. - + +`query`:: + (Optional, object) Query used to select data from the index. + The {es} query domain-specific language (DSL). This value corresponds to the query + object in an {es} search POST body. By default, this property has the following + value: `{"match_all": {}}`. + `evaluation`:: (Required, object) Defines the type of evaluation you want to perform. For example: `binary_soft_classification`. See <>. diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 4da16eb9e78..b566911c6e0 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -332,6 +332,42 @@ POST /_snapshot/my_unverified_backup/_verify It returns a list of nodes where repository was successfully verified or an error message if verification process failed. +[float] +===== Repository Cleanup +Repositories can over time accumulate data that is not referenced by any existing snapshot. This is a result of the data safety guarantees +the snapshot functionality provides in failure scenarios during snapshot creation and the decentralized nature of the snapshot creation +process. This unreferenced data does in no way negatively impact the performance or safety of a snapshot repository but leads to higher +than necessary storage use. In order to clean up this unreferenced data, users can call the cleanup endpoint for a repository which will +trigger a complete accounting of the repositories contents and subsequent deletion of all unreferenced data that was found. + +[source,js] +----------------------------------- +POST /_snapshot/my_repository/_cleanup +----------------------------------- +// CONSOLE +// TEST[continued] + +The response to a cleanup request looks as follows: + +[source,js] +-------------------------------------------------- +{ + "results": { + "deleted_bytes": 20, + "deleted_blobs": 5 + } +} +-------------------------------------------------- +// TESTRESPONSE + +Depending on the concrete repository implementation the numbers shown for bytes free as well as the number of blobs removed will either +be an approximation or an exact result. Any non-zero value for the number of blobs removed implies that unreferenced blobs were found and +subsequently cleaned up. + +Please note that most of the cleanup operations executed by this endpoint are automatically executed when deleting any snapshot from a +repository. If you regularly delete snapshots, you will in most cases not get any or only minor space savings from using this functionality +and should lower your frequency of invoking it accordingly. + [float] [[snapshots-take-snapshot]] === Snapshot diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc index a894ef0dae2..575ce7c3cf9 100644 --- a/docs/reference/query-dsl/match-query.asciidoc +++ b/docs/reference/query-dsl/match-query.asciidoc @@ -4,13 +4,152 @@ Match ++++ +Returns documents that match a provided text, number, date or boolean value. The +provided text is analyzed before matching. -`match` queries accept text/numerics/dates, analyzes -them, and constructs a query. For example: +The `match` query is the standard query for performing a full-text search, +including options for fuzzy matching. + + +[[match-query-ex-request]] +==== Example request [source,js] -------------------------------------------------- GET /_search +{ + "query": { + "match" : { + "message" : { + "query" : "this is a test" + } + } + } +} +-------------------------------------------------- +// CONSOLE + + +[[match-top-level-params]] +==== Top-level parameters for `match` + +``:: +(Required, object) Field you wish to search. + + +[[match-field-params]] +==== Parameters for `` +`query`:: ++ +-- +(Required) Text, number, boolean value or date you wish to find in the provided +``. + +The `match` query <> any provided text before performing a +search. This means the `match` query can search <> fields for +analyzed tokens rather than an exact term. +-- + +`analyzer`:: +(Optional, string) <> used to convert the text in the `query` +value into tokens. Defaults to the <> mapped for the ``. If no analyzer is mapped, the index's +default analyzer is used. + +`auto_generate_synonyms_phrase_query`:: ++ +-- +(Optional, boolean) If `true`, <> +queries are automatically created for multi-term synonyms. Defaults to `true`. + +See <> for an +example. +-- + +`fuzziness`:: +(Optional, string) Maximum edit distance allowed for matching. See <> +for valid values and more information. See <> +for an example. + +`max_expansions`:: +(Optional, integer) Maximum number of terms to which the query will +expand. Defaults to `50`. + +`prefix_length`:: +(Optional, integer) Number of beginning characters left unchanged for fuzzy +matching. Defaults to `0`. + +`transpositions`:: +(Optional, boolean) If `true`, edits for fuzzy matching include +transpositions of two adjacent characters (ab → ba). Defaults to `true`. + +`fuzzy_rewrite`:: ++ +-- +(Optional, string) Method used to rewrite the query. See the +<> for valid values and more +information. + +If the `fuzziness` parameter is not `0`, the `match` query uses a `rewrite` +method of `top_terms_blended_freqs_${max_expansions}` by default. +-- + +`lenient`:: +(Optional, boolean) If `true`, format-based errors, such as providing a text +`query` value for a <> field, are ignored. Defaults to `false`. + +`operator`:: ++ +-- +(Optional, string) Boolean logic used to interpret text in the `query` value. +Valid values are: + +`OR` (Default):: +For example, a `query` value of `capital of Hungary` is interpreted as `capital +OR of OR Hungary`. + +`AND`:: +For example, a `query` value of `capital of Hungary` is interpreted as `capital +AND of AND Hungary`. +-- + +`minimum_should_match`:: ++ +-- +(Optional, string) Minimum number of clauses that must match for a document to +be returned. See the <> for valid values and more information. +-- + +`zero_terms_query`:: ++ +-- +(Optional, string) Indicates whether no documents are returned if the `analyzer` +removes all tokens, such as when using a `stop` filter. Valid values are: + +`none` (Default):: +No documents are returned if the `analyzer` removes all tokens. + +`all`:: +Returns all documents, similar to a <> +query. + +See <> for an example. +-- + + +[[match-query-notes]] +==== Notes + +[[query-dsl-match-query-short-ex]] +===== Short request example + +You can simplify the match query syntax by combining the `` and `query` +parameters. For example: + +[source,js] +---- +GET /_search { "query": { "match" : { @@ -18,23 +157,38 @@ GET /_search } } } --------------------------------------------------- +---- // CONSOLE -Note, `message` is the name of a field, you can substitute the name of -any field instead. - [[query-dsl-match-query-boolean]] -==== match +===== How the match query works The `match` query is of type `boolean`. It means that the text provided is analyzed and the analysis process constructs a boolean query -from the provided text. The `operator` flag can be set to `or` or `and` +from the provided text. The `operator` parameter can be set to `or` or `and` to control the boolean clauses (defaults to `or`). The minimum number of optional `should` clauses to match can be set using the <> parameter. +Here is an example with the `operator` parameter: + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "match" : { + "message" : { + "query" : "this is a test", + "operator" : "and" + } + } + } +} +-------------------------------------------------- +// CONSOLE + The `analyzer` can be set to control which analyzer will perform the analysis process on the text. It defaults to the field explicit mapping definition, or the default search analyzer. @@ -44,7 +198,7 @@ data-type mismatches, such as trying to query a numeric field with a text query string. Defaults to `false`. [[query-dsl-match-query-fuzziness]] -===== Fuzziness +===== Fuzziness in the match query `fuzziness` allows _fuzzy matching_ based on the type of field being queried. See <> for allowed settings. diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index 84e4c57e99f..2886fa2ea74 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -1,9 +1,38 @@ +tag::allow-no-indices[] +`allow_no_indices`:: +(Optional, boolean) If `true`, the request returns an error if a wildcard +expression or `_all` value retrieves only missing or closed indices. This +parameter also applies to <> that point to a +missing or closed index. +end::allow-no-indices[] + tag::bytes[] `bytes`:: (Optional, <>) Unit used to display byte values. end::bytes[] +tag::expand-wildcards[] +`expand_wildcards`:: ++ +-- +(Optional, string) Controls what kind of indices that wildcard +expressions can expand to. Valid values are: + +`all`:: +Expand to open and closed indices. + +`open`:: +Expand only to open indices. + +`closed`:: +Expand only to closed indices. + +`none`:: +Wildcard expressions are not accepted. +-- +end::expand-wildcards[] + tag::cat-h[] `h`:: (Optional, string) Comma-separated list of column names to display. @@ -11,7 +40,7 @@ end::cat-h[] tag::flat-settings[] `flat_settings`:: -(Optional, boolean) If `true`, returns settings in flat format. Defaults to +(Optional, boolean) If `true`, returns settings in flat format. Defaults to `false`. end::flat-settings[] @@ -28,6 +57,19 @@ https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html[HTTP accept header]. Valid values include JSON, YAML, etc. end::http-format[] +tag::include-type-name[] +`include_type_name`:: +deprecated:[7.0.0, Mapping types have been deprecated. See <>.] +(Optional, boolean) If `true`, a mapping type is expected in the body of +mappings. Defaults to `false`. +end::include-type-name[] + +tag::index-ignore-unavailable[] +`ignore_unavailable`:: +(Optional, boolean) If `true`, missing or closed indices are not included in the +response. Defaults to `false`. +end::index-ignore-unavailable[] + tag::include-unloaded-segments[] `include_unloaded_segments`:: (Optional, boolean) If `true`, the response includes information from segments @@ -70,12 +112,65 @@ tag::cat-v[] to `false`. end::cat-v[] +tag::doc-pipeline[] +`pipeline`:: +(Optional, string) ID of the pipeline to use to preprocess incoming documents. +end::doc-pipeline[] + +tag::doc-refresh[] +`refresh`:: +(Optional, enum) If `true`, {es} refreshes the affected shards to make this +operation visible to search, if `wait_for` then wait for a refresh to make +this operation visible to search, if `false` do nothing with refreshes. +Valid values: `true`, `false`, `wait_for`. Default: `false`. +end::doc-refresh[] + +tag::doc-seq-no[] +`if_seq_no`:: +(Optional, integer) Only perform the operation if the document has this +sequence number. See <>. +end::doc-seq-no[] + +tag::doc-primary-term[] +`if_primary_term`:: +(Optional, integer) Only perform the operation if the document has +this primary term. See <>. +end::doc-primary-term[] + +tag::doc-routing[] +`routing`:: +(Optional, string) Target the specified primary shard. +end::doc-routing[] + +tag::doc-version[] +`version`:: +(Optional, integer) Explicit version number for concurrency control. +The specified version must match the current version of the document for the +request to succeed. +end::doc-version[] + +tag::doc-version-type[] +`version_type`:: +(Optional, enum) Specific version type: `internal`, `external`, +`external_gte`, `force`. +end::doc-version-type[] + +tag::doc-wait-for-active-shards[] +`wait_for_active_shards`:: +(Optional, string) The number of shard copies that must be active before +proceeding with the operation. Set to `all` or any positive integer up +to the total number of shards in the index (`number_of_replicas+1`). +Default: 1, the primary shard. +end::doc-wait-for-active-shards[] + tag::timeoutparms[] +tag::timeout[] `timeout`:: (Optional, <>) Specifies the period of time to wait for a response. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`. +end::timeout[] tag::master-timeout[] `master_timeout`:: @@ -84,4 +179,4 @@ a connection to the master node. If no response is received before the timeout expires, the request fails and returns an error. Defaults to `30s`. end::master-timeout[] -end::timeoutparms[] \ No newline at end of file +end::timeoutparms[] diff --git a/docs/reference/scripting/using.asciidoc b/docs/reference/scripting/using.asciidoc index a9646f2f3c8..5060c7fc67d 100644 --- a/docs/reference/scripting/using.asciidoc +++ b/docs/reference/scripting/using.asciidoc @@ -195,6 +195,21 @@ DELETE _scripts/calculate-score // CONSOLE // TEST[continued] +[float] +[[modules-scripting-search-templates]] +=== Search templates +You can also use the `_scripts` API to store **search templates**. Search +templates save specific <> with placeholder +values, called template parameters. + +You can use stored search templates to run searches without writing out the +entire query. Just provide the stored template's ID and the template parameters. +This is useful when you want to run a commonly used query quickly and without +mistakes. + +Search templates use the http://mustache.github.io/mustache.5.html[mustache +templating language]. See <> for more information and examples. + [float] [[modules-scripting-using-caching]] === Script caching diff --git a/docs/reference/search/request/from-size.asciidoc b/docs/reference/search/request/from-size.asciidoc index 04befada139..60ecd9ac705 100644 --- a/docs/reference/search/request/from-size.asciidoc +++ b/docs/reference/search/request/from-size.asciidoc @@ -24,5 +24,11 @@ GET /_search Note that `from` + `size` can not be more than the `index.max_result_window` -index setting which defaults to 10,000. See the <> or <> -API for more efficient ways to do deep scrolling. +index setting, which defaults to 10,000. + +WARNING: {es} uses Lucene's internal doc IDs as tie-breakers. These internal +doc IDs can be completely different across replicas of the same +data. When paginating, you might occasionally see that documents with the same +sort values are not ordered consistently. For deep scrolling, it is more +efficient to use the <> or +<> APIs. diff --git a/docs/reference/search/search-template.asciidoc b/docs/reference/search/search-template.asciidoc index f68a71c10c2..950477aa7d3 100644 --- a/docs/reference/search/search-template.asciidoc +++ b/docs/reference/search/search-template.asciidoc @@ -32,7 +32,209 @@ disable scripts per type and context as described in the <> [float] -==== More template examples +==== Examples + +[float] +[[pre-registered-templates]] +===== Store a search template + +You can store a search template using the stored scripts API. + +[source,js] +------------------------------------------ +POST _scripts/ +{ + "script": { + "lang": "mustache", + "source": { + "query": { + "match": { + "title": "{{query_string}}" + } + } + } + } +} +------------------------------------------ +// CONSOLE +// TEST[continued] + +////////////////////////// + +We want to be sure that the template has been created, +because we'll use it later. + +[source,js] +-------------------------------------------------- +{ + "acknowledged" : true +} +-------------------------------------------------- +// TESTRESPONSE + +////////////////////////// + +This template can be retrieved by + +[source,js] +------------------------------------------ +GET _scripts/ +------------------------------------------ +// CONSOLE +// TEST[continued] + +which is rendered as: + +[source,js] +------------------------------------------ +{ + "script" : { + "lang" : "mustache", + "source" : "{\"query\":{\"match\":{\"title\":\"{{query_string}}\"}}}", + "options": { + "content_type" : "application/json; charset=UTF-8" + } + }, + "_id": "", + "found": true +} +------------------------------------------ +// TESTRESPONSE + +This template can be deleted by + +[source,js] +------------------------------------------ +DELETE _scripts/ +------------------------------------------ +// CONSOLE +// TEST[continued] + +////////////////////////// + +We want to be sure that the template has been created, +because we'll use it later. + +[source,js] +-------------------------------------------------- +{ + "acknowledged" : true +} +-------------------------------------------------- +// TESTRESPONSE + +////////////////////////// + +[float] +[[use-registered-templates]] +===== Use a stored search template + +To use a stored template at search time use: + +[source,js] +------------------------------------------ +GET _search/template +{ + "id": "", <1> + "params": { + "query_string": "search for these words" + } +} +------------------------------------------ +// CONSOLE +// TEST[catch:missing] +<1> Name of the stored template script. + +[float] +[[_validating_templates]] +==== Validate a search template + +A template can be rendered in a response with given parameters using + +[source,js] +------------------------------------------ +GET _render/template +{ + "source": "{ \"query\": { \"terms\": {{#toJson}}statuses{{/toJson}} }}", + "params": { + "statuses" : { + "status": [ "pending", "published" ] + } + } +} +------------------------------------------ +// CONSOLE + +This call will return the rendered template: + +[source,js] +------------------------------------------ +{ + "template_output": { + "query": { + "terms": { + "status": [ <1> + "pending", + "published" + ] + } + } + } +} +------------------------------------------ +// TESTRESPONSE +<1> `status` array has been populated with values from the `params` object. + +Stored templates can also be rendered using + +[source,js] +------------------------------------------ +GET _render/template/ +{ + "params": { + "..." + } +} +------------------------------------------ +// NOTCONSOLE + +[float] +===== Explain + +You can use `explain` parameter when running a template: + +[source,js] +------------------------------------------ +GET _search/template +{ + "id": "my_template", + "params": { + "status": [ "pending", "published" ] + }, + "explain": true +} +------------------------------------------ +// CONSOLE +// TEST[catch:missing] + +[float] +===== Profiling + +You can use `profile` parameter when running a template: + +[source,js] +------------------------------------------ +GET _search/template +{ + "id": "my_template", + "params": { + "status": [ "pending", "published" ] + }, + "profile": true +} +------------------------------------------ +// CONSOLE +// TEST[catch:missing] [float] ===== Filling in a query string with a single value @@ -397,204 +599,6 @@ The previous query will be rendered as: ------------------------------------------ // TESTRESPONSE - -[float] -[[pre-registered-templates]] -===== Pre-registered template - -You can register search templates by using the stored scripts api. - -[source,js] ------------------------------------------- -POST _scripts/ -{ - "script": { - "lang": "mustache", - "source": { - "query": { - "match": { - "title": "{{query_string}}" - } - } - } - } -} ------------------------------------------- -// CONSOLE -// TEST[continued] - -////////////////////////// - -We want to be sure that the template has been created, -because we'll use it later. - -[source,js] --------------------------------------------------- -{ - "acknowledged" : true -} --------------------------------------------------- -// TESTRESPONSE - -////////////////////////// - -This template can be retrieved by - -[source,js] ------------------------------------------- -GET _scripts/ ------------------------------------------- -// CONSOLE -// TEST[continued] - -which is rendered as: - -[source,js] ------------------------------------------- -{ - "script" : { - "lang" : "mustache", - "source" : "{\"query\":{\"match\":{\"title\":\"{{query_string}}\"}}}", - "options": { - "content_type" : "application/json; charset=UTF-8" - } - }, - "_id": "", - "found": true -} ------------------------------------------- -// TESTRESPONSE - -This template can be deleted by - -[source,js] ------------------------------------------- -DELETE _scripts/ ------------------------------------------- -// CONSOLE -// TEST[continued] - -////////////////////////// - -We want to be sure that the template has been created, -because we'll use it later. - -[source,js] --------------------------------------------------- -{ - "acknowledged" : true -} --------------------------------------------------- -// TESTRESPONSE - -////////////////////////// - -To use a stored template at search time use: - -[source,js] ------------------------------------------- -GET _search/template -{ - "id": "", <1> - "params": { - "query_string": "search for these words" - } -} ------------------------------------------- -// CONSOLE -// TEST[catch:missing] -<1> Name of the stored template script. - -[float] -==== Validating templates - -A template can be rendered in a response with given parameters using - -[source,js] ------------------------------------------- -GET _render/template -{ - "source": "{ \"query\": { \"terms\": {{#toJson}}statuses{{/toJson}} }}", - "params": { - "statuses" : { - "status": [ "pending", "published" ] - } - } -} ------------------------------------------- -// CONSOLE - -This call will return the rendered template: - -[source,js] ------------------------------------------- -{ - "template_output": { - "query": { - "terms": { - "status": [ <1> - "pending", - "published" - ] - } - } - } -} ------------------------------------------- -// TESTRESPONSE -<1> `status` array has been populated with values from the `params` object. - -Pre-registered templates can also be rendered using - -[source,js] ------------------------------------------- -GET _render/template/ -{ - "params": { - "..." - } -} ------------------------------------------- -// NOTCONSOLE - -[float] -===== Explain - -You can use `explain` parameter when running a template: - -[source,js] ------------------------------------------- -GET _search/template -{ - "id": "my_template", - "params": { - "status": [ "pending", "published" ] - }, - "explain": true -} ------------------------------------------- -// CONSOLE -// TEST[catch:missing] - -[float] -===== Profiling - -You can use `profile` parameter when running a template: - -[source,js] ------------------------------------------- -GET _search/template -{ - "id": "my_template", - "params": { - "status": [ "pending", "published" ] - }, - "profile": true -} ------------------------------------------- -// CONSOLE -// TEST[catch:missing] - [[multi-search-template]] === Multi Search Template diff --git a/docs/reference/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc index a2eb84bc211..97e51131606 100644 --- a/docs/reference/settings/notification-settings.asciidoc +++ b/docs/reference/settings/notification-settings.asciidoc @@ -76,7 +76,7 @@ corresponding endpoints are whitelisted as well. [[ssl-notification-settings]] :ssl-prefix: xpack.http -:component: {watcher} +:component: {watcher} HTTP :verifies: :server!: :ssl-context: watcher @@ -215,6 +215,15 @@ HTML feature groups>>. Set to `false` to completely disable HTML sanitation. Not recommended. Defaults to `true`. +[[ssl-notification-smtp-settings]] +:ssl-prefix: xpack.notification.email +:component: {watcher} Email +:verifies: +:server!: +:ssl-context: watcher-email + +include::ssl-settings.asciidoc[] + [float] [[slack-notification-settings]] ==== Slack Notification Settings @@ -334,4 +343,4 @@ The default event type. Valid values: `trigger`,`resolve`, `acknowledge`. `attach_payload`:: Whether or not to provide the watch payload as context for the event by default. Valid values: `true`, `false`. --- \ No newline at end of file +-- diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/Command.java b/libs/cli/src/main/java/org/elasticsearch/cli/Command.java index 34ede7ccf94..2a270153f47 100644 --- a/libs/cli/src/main/java/org/elasticsearch/cli/Command.java +++ b/libs/cli/src/main/java/org/elasticsearch/cli/Command.java @@ -73,7 +73,7 @@ public abstract class Command implements Closeable { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw)) { e.printStackTrace(pw); - terminal.println(sw.toString()); + terminal.errorPrintln(sw.toString()); } catch (final IOException impossible) { // StringWriter#close declares a checked IOException from the Closeable interface but the Javadocs for StringWriter // say that an exception here is impossible @@ -89,14 +89,15 @@ public abstract class Command implements Closeable { try { mainWithoutErrorHandling(args, terminal); } catch (OptionException e) { - printHelp(terminal); - terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage()); + // print help to stderr on exceptions + printHelp(terminal, true); + terminal.errorPrintln(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage()); return ExitCodes.USAGE; } catch (UserException e) { if (e.exitCode == ExitCodes.USAGE) { - printHelp(terminal); + printHelp(terminal, true); } - terminal.println(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage()); + terminal.errorPrintln(Terminal.Verbosity.SILENT, "ERROR: " + e.getMessage()); return e.exitCode; } return ExitCodes.OK; @@ -109,7 +110,7 @@ public abstract class Command implements Closeable { final OptionSet options = parser.parse(args); if (options.has(helpOption)) { - printHelp(terminal); + printHelp(terminal, false); return; } @@ -125,11 +126,17 @@ public abstract class Command implements Closeable { } /** Prints a help message for the command to the terminal. */ - private void printHelp(Terminal terminal) throws IOException { - terminal.println(description); - terminal.println(""); - printAdditionalHelp(terminal); - parser.printHelpOn(terminal.getWriter()); + private void printHelp(Terminal terminal, boolean toStdError) throws IOException { + if (toStdError) { + terminal.errorPrintln(description); + terminal.errorPrintln(""); + parser.printHelpOn(terminal.getErrorWriter()); + } else { + terminal.println(description); + terminal.println(""); + printAdditionalHelp(terminal); + parser.printHelpOn(terminal.getWriter()); + } } /** Prints additional help information, specific to the command */ diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java b/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java index a0ebff5d670..718b4796c02 100644 --- a/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java +++ b/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java @@ -39,9 +39,17 @@ import java.util.Locale; */ public abstract class Terminal { + /** Writer to standard error - not supplied by the {@link Console} API, so we share with subclasses */ + private static final PrintWriter ERROR_WRITER = newErrorWriter(); + /** The default terminal implementation, which will be a console if available, or stdout/stderr if not. */ public static final Terminal DEFAULT = ConsoleTerminal.isSupported() ? new ConsoleTerminal() : new SystemTerminal(); + @SuppressForbidden(reason = "Writer for System.err") + private static PrintWriter newErrorWriter() { + return new PrintWriter(System.err); + } + /** Defines the available verbosity levels of messages to be printed. */ public enum Verbosity { SILENT, /* always printed */ @@ -70,9 +78,14 @@ public abstract class Terminal { /** Reads password text from the terminal input. See {@link Console#readPassword()}}. */ public abstract char[] readSecret(String prompt); - /** Returns a Writer which can be used to write to the terminal directly. */ + /** Returns a Writer which can be used to write to the terminal directly using standard output. */ public abstract PrintWriter getWriter(); + /** Returns a Writer which can be used to write to the terminal directly using standard error. */ + public PrintWriter getErrorWriter() { + return ERROR_WRITER; + } + /** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level. */ public final void println(String msg) { println(Verbosity.NORMAL, msg); @@ -83,14 +96,35 @@ public abstract class Terminal { print(verbosity, msg + lineSeparator); } - /** Prints message to the terminal at {@code verbosity} level, without a newline. */ + /** Prints message to the terminal's standard output at {@code verbosity} level, without a newline. */ public final void print(Verbosity verbosity, String msg) { + print(verbosity, msg, false); + } + + /** Prints message to the terminal at {@code verbosity} level, without a newline. */ + private void print(Verbosity verbosity, String msg, boolean isError) { if (isPrintable(verbosity)) { - getWriter().print(msg); - getWriter().flush(); + PrintWriter writer = isError ? getErrorWriter() : getWriter(); + writer.print(msg); + writer.flush(); } } + /** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level, without a newline. */ + public final void errorPrint(Verbosity verbosity, String msg) { + print(verbosity, msg, true); + } + + /** Prints a line to the terminal's standard error at {@link Verbosity#NORMAL} verbosity level. */ + public final void errorPrintln(String msg) { + errorPrintln(Verbosity.NORMAL, msg); + } + + /** Prints a line to the terminal's standard error at {@code verbosity} level. */ + public final void errorPrintln(Verbosity verbosity, String msg) { + errorPrint(verbosity, msg + lineSeparator); + } + /** Checks if is enough {@code verbosity} level to be printed */ public final boolean isPrintable(Verbosity verbosity) { return this.verbosity.ordinal() >= verbosity.ordinal(); @@ -110,7 +144,7 @@ public abstract class Terminal { answer = answer.toLowerCase(Locale.ROOT); boolean answerYes = answer.equals("y"); if (answerYes == false && answer.equals("n") == false) { - println("Did not understand answer '" + answer + "'"); + errorPrintln("Did not understand answer '" + answer + "'"); continue; } return answerYes; @@ -165,7 +199,7 @@ public abstract class Terminal { @Override public String readText(String text) { - getWriter().print(text); + getErrorWriter().print(text); // prompts should go to standard error to avoid mixing with list output BufferedReader reader = new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset())); try { final String line = reader.readLine(); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 15678560443..9d550ce5f5d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -817,11 +817,6 @@ public final class Walker extends PainlessParserBaseVisitor { @Override public ANode visitRegex(RegexContext ctx) { - if (false == settings.areRegexesEnabled()) { - throw location(ctx).createError(new IllegalStateException("Regexes are disabled. Set [script.painless.regex.enabled] to [true] " - + "in elasticsearch.yaml to allow them. Be careful though, regexes break out of Painless's protection against deep " - + "recursion and long loops.")); - } String text = ctx.REGEX().getText(); int lastSlash = text.lastIndexOf('/'); String pattern = text.substring(1, lastSlash); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java index 356c1958c6e..106549c1c13 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ERegex.java @@ -40,6 +40,8 @@ public final class ERegex extends AExpression { private final int flags; private Constant constant; + private CompilerSettings settings; + public ERegex(Location location, String pattern, String flagsString) { super(location); @@ -56,7 +58,7 @@ public final class ERegex extends AExpression { @Override void storeSettings(CompilerSettings settings) { - // do nothing + this.settings = settings; } @Override @@ -66,6 +68,12 @@ public final class ERegex extends AExpression { @Override void analyze(Locals locals) { + if (false == settings.areRegexesEnabled()) { + throw createError(new IllegalStateException("Regexes are disabled. Set [script.painless.regex.enabled] to [true] " + + "in elasticsearch.yaml to allow them. Be careful though, regexes break out of Painless's protection against deep " + + "recursion and long loops.")); + } + if (!read) { throw createError(new IllegalArgumentException("Regex constant may only be read [" + pattern + "].")); } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java index d1db6606c86..1dac5ae2573 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java @@ -262,7 +262,7 @@ public class WhenThingsGoWrongTests extends ScriptTestCase { } public void testRegexDisabledByDefault() { - IllegalStateException e = expectThrows(IllegalStateException.class, () -> exec("return 'foo' ==~ /foo/")); + IllegalStateException e = expectScriptThrows(IllegalStateException.class, () -> exec("return 'foo' ==~ /foo/")); assertEquals("Regexes are disabled. Set [script.painless.regex.enabled] to [true] in elasticsearch.yaml to allow them. " + "Be careful though, regexes break out of Painless's protection against deep recursion and long loops.", e.getMessage()); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java index 25e44e392c4..ccd319fdc77 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java @@ -123,11 +123,14 @@ public class ReindexDocumentationIT extends ESIntegTestCase { .filter(QueryBuilders.termQuery("level", "awesome")) .maxDocs(1000) .script(new Script(ScriptType.INLINE, - "ctx._source.awesome = 'absolutely'", "painless", + "ctx._source.awesome = 'absolutely'", Collections.emptyMap())); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-filter + + // validate order of string params to Script constructor + assertEquals(updateByQuery.request().getScript().getLang(), "painless"); } { // tag::update-by-query-size @@ -157,16 +160,19 @@ public class ReindexDocumentationIT extends ESIntegTestCase { updateByQuery.source("source_index") .script(new Script( ScriptType.INLINE, + "painless", "if (ctx._source.awesome == 'absolutely') {" + " ctx.op='noop'" + "} else if (ctx._source.awesome == 'lame') {" + " ctx.op='delete'" + "} else {" + "ctx._source.awesome = 'absolutely'}", - "painless", Collections.emptyMap())); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-script + + // validate order of string params to Script constructor + assertEquals(updateByQuery.request().getScript().getLang(), "painless"); } { // tag::update-by-query-multi-index diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java index 604dc7c083e..7a74078894c 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import java.io.BufferedInputStream; @@ -97,7 +98,7 @@ public class URLBlobContainer extends AbstractBlobContainer { } @Override - public void delete() { + public DeleteResult delete() { throw new UnsupportedOperationException("URL repository is read only"); } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 769e883205d..6c1579bc283 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -152,7 +152,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { public Netty4HttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, Dispatcher dispatcher) { super(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher); - Netty4Utils.setAvailableProcessors(EsExecutors.PROCESSORS_SETTING.get(settings)); + Netty4Utils.setAvailableProcessors(EsExecutors.NODE_PROCESSORS_SETTING.get(settings)); this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index af5e8b66fe7..d3e43e16dd5 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -112,7 +112,7 @@ public class Netty4Transport extends TcpTransport { PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); - Netty4Utils.setAvailableProcessors(EsExecutors.PROCESSORS_SETTING.get(settings)); + Netty4Utils.setAvailableProcessors(EsExecutors.NODE_PROCESSORS_SETTING.get(settings)); this.workerCount = WORKER_COUNT.get(settings); // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java index 22000cf7979..c7e71fab634 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/index/analysis/KuromojiTokenizerFactory.java @@ -23,17 +23,22 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode; import org.apache.lucene.analysis.ja.dict.UserDictionary; +import org.apache.lucene.analysis.ja.util.CSVUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import java.io.IOException; -import java.io.Reader; +import java.io.StringReader; +import java.util.HashSet; +import java.util.List; +import java.util.Set; public class KuromojiTokenizerFactory extends AbstractTokenizerFactory { - private static final String USER_DICT_OPTION = "user_dictionary"; + private static final String USER_DICT_PATH_OPTION = "user_dictionary"; + private static final String USER_DICT_RULES_OPTION = "user_dictionary_rules"; private static final String NBEST_COST = "nbest_cost"; private static final String NBEST_EXAMPLES = "nbest_examples"; @@ -54,17 +59,33 @@ public class KuromojiTokenizerFactory extends AbstractTokenizerFactory { } public static UserDictionary getUserDictionary(Environment env, Settings settings) { + if (settings.get(USER_DICT_PATH_OPTION) != null && settings.get(USER_DICT_RULES_OPTION) != null) { + throw new IllegalArgumentException("It is not allowed to use [" + USER_DICT_PATH_OPTION + "] in conjunction" + + " with [" + USER_DICT_RULES_OPTION + "]"); + } try { - final Reader reader = Analysis.getReaderFromFile(env, settings, USER_DICT_OPTION); - if (reader == null) { + List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, false); + if (ruleList == null || ruleList.isEmpty()) { return null; - } else { - try { - return UserDictionary.open(reader); - } finally { - reader.close(); - } } + Set dup = new HashSet<>(); + int lineNum = 0; + for (String line : ruleList) { + // ignore comments + if (line.startsWith("#") == false) { + String[] values = CSVUtil.parse(line); + if (dup.add(values[0]) == false) { + throw new IllegalArgumentException("Found duplicate term [" + values[0] + "] in user dictionary " + + "at line [" + lineNum + "]"); + } + } + ++ lineNum; + } + StringBuilder sb = new StringBuilder(); + for (String line : ruleList) { + sb.append(line).append(System.lineSeparator()); + } + return UserDictionary.open(new StringReader(sb.toString())); } catch (IOException e) { throw new ElasticsearchException("failed to load kuromoji user dictionary", e); } diff --git a/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java b/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java index 29e73d5a9fa..9add830c26c 100644 --- a/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java +++ b/plugins/analysis-kuromoji/src/test/java/org/elasticsearch/index/analysis/KuromojiAnalysisTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.analysis; +import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ja.JapaneseAnalyzer; @@ -39,6 +40,8 @@ import java.io.StringReader; import java.nio.file.Files; import java.nio.file.Path; +import static org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; @@ -307,4 +310,55 @@ public class KuromojiAnalysisTests extends ESTestCase { tokenizer.setReader(new StringReader(source)); assertSimpleTSOutput(tokenFilter.create(tokenizer), expected); } + + public void testKuromojiAnalyzerUserDict() throws Exception { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_analyzer.type", "kuromoji") + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++,c++,w,w", "制限スピード,制限スピード,セイゲンスピード,テスト名詞") + .build(); + TestAnalysis analysis = createTestAnalysis(settings); + Analyzer analyzer = analysis.indexAnalyzers.get("my_analyzer"); + try (TokenStream stream = analyzer.tokenStream("", "制限スピード")) { + assertTokenStreamContents(stream, new String[]{"制限スピード"}); + } + + try (TokenStream stream = analyzer.tokenStream("", "c++world")) { + assertTokenStreamContents(stream, new String[]{"c++", "world"}); + } + } + + public void testKuromojiAnalyzerInvalidUserDictOption() throws Exception { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_analyzer.type", "kuromoji") + .put("index.analysis.analyzer.my_analyzer.user_dictionary", "user_dict.txt") + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++,c++,w,w") + .build(); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> createTestAnalysis(settings)); + assertThat(exc.getMessage(), containsString("It is not allowed to use [user_dictionary] in conjunction " + + "with [user_dictionary_rules]")); + } + + public void testKuromojiAnalyzerDuplicateUserDictRule() throws Exception { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_analyzer.type", "kuromoji") + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", + "c++,c++,w,w", "#comment", "制限スピード,制限スピード,セイゲンスピード,テスト名詞", "制限スピード,制限スピード,セイゲンスピード,テスト名詞") + .build(); + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> createTestAnalysis(settings)); + assertThat(exc.getMessage(), containsString("[制限スピード] in user dictionary at line [3]")); + } + + private TestAnalysis createTestAnalysis(Settings analysisSettings) throws IOException { + InputStream dict = KuromojiAnalysisTests.class.getResourceAsStream("user_dict.txt"); + Path home = createTempDir(); + Path config = home.resolve("config"); + Files.createDirectory(config); + Files.copy(dict, config.resolve("user_dict.txt")); + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(Environment.PATH_HOME_SETTING.getKey(), home) + .put(analysisSettings) + .build(); + return AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new AnalysisKuromojiPlugin()); + } } diff --git a/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_nori/10_basic.yml b/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yml similarity index 100% rename from plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_nori/10_basic.yml rename to plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/10_basic.yml diff --git a/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_nori/20_search.yml b/plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/20_search.yml similarity index 100% rename from plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_nori/20_search.yml rename to plugins/analysis-kuromoji/src/test/resources/rest-api-spec/test/analysis_kuromoji/20_search.yml diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/index/analysis/NoriTokenizerFactory.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/index/analysis/NoriTokenizerFactory.java index 8830cf7c977..bac5dd2a770 100644 --- a/plugins/analysis-nori/src/main/java/org/elasticsearch/index/analysis/NoriTokenizerFactory.java +++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/index/analysis/NoriTokenizerFactory.java @@ -51,7 +51,7 @@ public class NoriTokenizerFactory extends AbstractTokenizerFactory { throw new IllegalArgumentException("It is not allowed to use [" + USER_DICT_PATH_OPTION + "] in conjunction" + " with [" + USER_DICT_RULES_OPTION + "]"); } - List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION); + List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, true); StringBuilder sb = new StringBuilder(); if (ruleList == null || ruleList.isEmpty()) { return null; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index 23b1fe59a64..37963648a74 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.threadpool.ThreadPool; @@ -126,9 +127,9 @@ public class AzureBlobContainer extends AbstractBlobContainer { } @Override - public void delete() throws IOException { + public DeleteResult delete() throws IOException { try { - blobStore.deleteBlobDirectory(keyPath, threadPool.executor(AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME)); + return blobStore.deleteBlobDirectory(keyPath, threadPool.executor(AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME)); } catch (URISyntaxException | StorageException e) { throw new IOException(e); } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index a7d9bb93a51..e4a7e3acb65 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -21,12 +21,12 @@ package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; - import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.repositories.azure.AzureRepository.Repository; import org.elasticsearch.threadpool.ThreadPool; @@ -92,8 +92,9 @@ public class AzureBlobStore implements BlobStore { service.deleteBlob(clientName, container, blob); } - public void deleteBlobDirectory(String path, Executor executor) throws URISyntaxException, StorageException, IOException { - service.deleteBlobDirectory(clientName, container, path, executor); + public DeleteResult deleteBlobDirectory(String path, Executor executor) + throws URISyntaxException, StorageException, IOException { + return service.deleteBlobDirectory(clientName, container, path, executor); } public InputStream getInputStream(String blob) throws URISyntaxException, StorageException, IOException { diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index be98edda83d..ef34c533501 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -43,6 +43,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; @@ -73,7 +74,7 @@ import java.util.function.Supplier; import static java.util.Collections.emptyMap; public class AzureStorageService { - + private static final Logger logger = LogManager.getLogger(AzureStorageService.class); public static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); @@ -193,13 +194,15 @@ public class AzureStorageService { }); } - void deleteBlobDirectory(String account, String container, String path, Executor executor) + DeleteResult deleteBlobDirectory(String account, String container, String path, Executor executor) throws URISyntaxException, StorageException, IOException { final Tuple> client = client(account); final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); final Collection exceptions = Collections.synchronizedList(new ArrayList<>()); final AtomicLong outstanding = new AtomicLong(1L); final PlainActionFuture result = PlainActionFuture.newFuture(); + final AtomicLong blobsDeleted = new AtomicLong(); + final AtomicLong bytesDeleted = new AtomicLong(); SocketAccess.doPrivilegedVoidException(() -> { for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true)) { // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ @@ -209,7 +212,17 @@ public class AzureStorageService { executor.execute(new AbstractRunnable() { @Override protected void doRun() throws Exception { + final long len; + if (blobItem instanceof CloudBlob) { + len = ((CloudBlob) blobItem).getProperties().getLength(); + } else { + len = -1L; + } deleteBlob(account, container, blobPath); + blobsDeleted.incrementAndGet(); + if (len >= 0) { + bytesDeleted.addAndGet(len); + } } @Override @@ -235,6 +248,7 @@ public class AzureStorageService { exceptions.forEach(ex::addSuppressed); throw ex; } + return new DeleteResult(blobsDeleted.get(), bytesDeleted.get()); } public InputStream getInputStream(String account, String container, String blob) diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java index 4657ece3c8a..da227502427 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java @@ -22,6 +22,7 @@ package org.elasticsearch.repositories.gcs; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import java.io.IOException; @@ -77,8 +78,8 @@ class GoogleCloudStorageBlobContainer extends AbstractBlobContainer { } @Override - public void delete() throws IOException { - blobStore.deleteDirectory(path().buildAsString()); + public DeleteResult delete() throws IOException { + return blobStore.deleteDirectory(path().buildAsString()); } @Override diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 2ff4dc6d977..5586be349bc 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStoreException; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.core.internal.io.Streams; @@ -55,6 +56,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -300,15 +302,24 @@ class GoogleCloudStorageBlobStore implements BlobStore { * * @param pathStr Name of path to delete */ - void deleteDirectory(String pathStr) throws IOException { - SocketAccess.doPrivilegedVoidIOException(() -> { + DeleteResult deleteDirectory(String pathStr) throws IOException { + return SocketAccess.doPrivilegedIOException(() -> { + DeleteResult deleteResult = DeleteResult.ZERO; Page page = client().get(bucketName).list(BlobListOption.prefix(pathStr)); do { final Collection blobsToDelete = new ArrayList<>(); - page.getValues().forEach(b -> blobsToDelete.add(b.getName())); + final AtomicLong blobsDeleted = new AtomicLong(0L); + final AtomicLong bytesDeleted = new AtomicLong(0L); + page.getValues().forEach(b -> { + blobsToDelete.add(b.getName()); + blobsDeleted.incrementAndGet(); + bytesDeleted.addAndGet(b.getSize()); + }); deleteBlobsIgnoringIfNotExists(blobsToDelete); + deleteResult = deleteResult.add(blobsDeleted.get(), bytesDeleted.get()); page = page.getNextPage(); } while (page != null); + return deleteResult; }); } diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java index e4c9af4d6c7..304906464dc 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.fs.FsBlobContainer; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; @@ -69,9 +70,13 @@ final class HdfsBlobContainer extends AbstractBlobContainer { } } + // TODO: See if we can get precise result reporting. + private static final DeleteResult DELETE_RESULT = new DeleteResult(1L, 0L); + @Override - public void delete() throws IOException { + public DeleteResult delete() throws IOException { store.execute(fileContext -> fileContext.delete(path, true)); + return DELETE_RESULT; } @Override diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java index e34f290a8e2..d65db92f067 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsRepositoryTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.repositories.hdfs; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.MockSecureSettings; @@ -30,6 +31,7 @@ import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; import java.util.Collection; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; @ThreadLeakFilters(filters = HdfsClientThreadLeakFilter.class) public class HdfsRepositoryTests extends AbstractThirdPartyRepositoryTestCase { @@ -58,4 +60,14 @@ public class HdfsRepositoryTests extends AbstractThirdPartyRepositoryTestCase { ).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); } + + // HDFS repository doesn't have precise cleanup stats so we only check whether or not any blobs were removed + @Override + protected void assertCleanupResponse(CleanupRepositoryResponse response, long bytes, long blobs) { + if (blobs > 0) { + assertThat(response.result().blobs(), greaterThan(0L)); + } else { + assertThat(response.result().blobs(), equalTo(0L)); + } + } } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 9e9cef9cd0e..46910d840cd 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -32,7 +32,6 @@ import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.PartETag; import com.amazonaws.services.s3.model.PutObjectRequest; import com.amazonaws.services.s3.model.S3Object; -import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.UploadPartRequest; import com.amazonaws.services.s3.model.UploadPartResult; import org.apache.lucene.util.SetOnce; @@ -42,6 +41,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.Tuple; @@ -54,6 +54,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; import java.util.stream.Collectors; @@ -121,7 +122,9 @@ class S3BlobContainer extends AbstractBlobContainer { } @Override - public void delete() throws IOException { + public DeleteResult delete() throws IOException { + final AtomicLong deletedBlobs = new AtomicLong(); + final AtomicLong deletedBytes = new AtomicLong(); try (AmazonS3Reference clientReference = blobStore.clientReference()) { ObjectListing prevListing = null; while (true) { @@ -135,8 +138,12 @@ class S3BlobContainer extends AbstractBlobContainer { listObjectsRequest.setPrefix(keyPath); list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(listObjectsRequest)); } - final List blobsToDelete = - list.getObjectSummaries().stream().map(S3ObjectSummary::getKey).collect(Collectors.toList()); + final List blobsToDelete = new ArrayList<>(); + list.getObjectSummaries().forEach(s3ObjectSummary -> { + deletedBlobs.incrementAndGet(); + deletedBytes.addAndGet(s3ObjectSummary.getSize()); + blobsToDelete.add(s3ObjectSummary.getKey()); + }); if (list.isTruncated()) { doDeleteBlobs(blobsToDelete, false); prevListing = list; @@ -150,6 +157,7 @@ class S3BlobContainer extends AbstractBlobContainer { } catch (final AmazonClientException e) { throw new IOException("Exception when deleting blob container [" + keyPath + "]", e); } + return new DeleteResult(deletedBlobs.get(), deletedBytes.get()); } @Override diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java new file mode 100644 index 00000000000..ab88cc93682 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -0,0 +1,385 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import com.amazonaws.SdkClientException; +import com.amazonaws.services.s3.internal.MD5DigestCalculatingInputStream; +import com.amazonaws.util.Base16; +import com.sun.net.httpserver.HttpServer; +import org.apache.http.HttpStatus; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; +import org.elasticsearch.common.lucene.store.InputStreamIndexInput; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.InputStream; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketTimeoutException; +import java.nio.charset.StandardCharsets; +import java.util.Locale; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import static org.elasticsearch.repositories.s3.S3ClientSettings.DISABLE_CHUNKED_ENCODING; +import static org.elasticsearch.repositories.s3.S3ClientSettings.ENDPOINT_SETTING; +import static org.elasticsearch.repositories.s3.S3ClientSettings.MAX_RETRIES_SETTING; +import static org.elasticsearch.repositories.s3.S3ClientSettings.READ_TIMEOUT_SETTING; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +/** + * This class tests how a {@link S3BlobContainer} and its underlying AWS S3 client are retrying requests when reading or writing blobs. + */ +@SuppressForbidden(reason = "use a http server") +public class S3BlobContainerRetriesTests extends ESTestCase { + + private HttpServer httpServer; + private S3Service service; + + @Before + public void setUp() throws Exception { + service = new S3Service(); + httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + httpServer.start(); + super.setUp(); + } + + @After + public void tearDown() throws Exception { + IOUtils.close(service); + httpServer.stop(0); + super.tearDown(); + } + + private BlobContainer createBlobContainer(final @Nullable Integer maxRetries, + final @Nullable TimeValue readTimeout, + final @Nullable Boolean disableChunkedEncoding, + final @Nullable ByteSizeValue bufferSize) { + final Settings.Builder clientSettings = Settings.builder(); + final String clientName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + + final String endpoint; + if (httpServer.getAddress().getAddress() instanceof Inet6Address) { + endpoint = "http://[" + httpServer.getAddress().getHostString() + "]:" + httpServer.getAddress().getPort(); + } else { + endpoint = "http://" + httpServer.getAddress().getHostString() + ":" + httpServer.getAddress().getPort(); + } + clientSettings.put(ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint); + if (maxRetries != null) { + clientSettings.put(MAX_RETRIES_SETTING.getConcreteSettingForNamespace(clientName).getKey(), maxRetries); + } + if (readTimeout != null) { + clientSettings.put(READ_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), readTimeout); + } + if (disableChunkedEncoding != null) { + clientSettings.put(DISABLE_CHUNKED_ENCODING.getConcreteSettingForNamespace(clientName).getKey(), disableChunkedEncoding); + } + + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString(S3ClientSettings.ACCESS_KEY_SETTING.getConcreteSettingForNamespace(clientName).getKey(), "access"); + secureSettings.setString(S3ClientSettings.SECRET_KEY_SETTING.getConcreteSettingForNamespace(clientName).getKey(), "secret"); + clientSettings.setSecureSettings(secureSettings); + service.refreshAndClearCache(S3ClientSettings.load(clientSettings.build())); + + final RepositoryMetaData repositoryMetaData = new RepositoryMetaData("repository", S3Repository.TYPE, + Settings.builder().put(S3Repository.CLIENT_NAME.getKey(), clientName).build()); + + return new S3BlobContainer(BlobPath.cleanPath(), new S3BlobStore(service, "bucket", + S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getDefault(Settings.EMPTY), + bufferSize == null ? S3Repository.BUFFER_SIZE_SETTING.getDefault(Settings.EMPTY) : bufferSize, + S3Repository.CANNED_ACL_SETTING.getDefault(Settings.EMPTY), + S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), + repositoryMetaData)); + } + + public void testReadBlobWithRetries() throws Exception { + final int maxRetries = randomInt(5); + final CountDown countDown = new CountDown(maxRetries + 1); + + final byte[] bytes = randomByteArrayOfLength(randomIntBetween(1, 512)); + httpServer.createContext("/bucket/read_blob_max_retries", exchange -> { + Streams.readFully(exchange.getRequestBody()); + if (countDown.countDown()) { + exchange.getResponseHeaders().add("Content-Type", "text/plain; charset=utf-8"); + exchange.sendResponseHeaders(HttpStatus.SC_OK, bytes.length); + exchange.getResponseBody().write(bytes); + exchange.close(); + return; + } + exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY, + HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1); + exchange.close(); + }); + + final BlobContainer blobContainer = createBlobContainer(maxRetries, null, null, null); + try (InputStream inputStream = blobContainer.readBlob("read_blob_max_retries")) { + assertArrayEquals(bytes, BytesReference.toBytes(Streams.readFully(inputStream))); + assertThat(countDown.isCountedDown(), is(true)); + } + } + + public void testReadBlobWithReadTimeouts() { + final TimeValue readTimeout = TimeValue.timeValueMillis(randomIntBetween(100, 500)); + final BlobContainer blobContainer = createBlobContainer(1, readTimeout, null, null); + + // HTTP server does not send a response + httpServer.createContext("/bucket/read_blob_unresponsive", exchange -> {}); + + Exception exception = expectThrows(SdkClientException.class, () -> blobContainer.readBlob("read_blob_unresponsive")); + assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out")); + assertThat(exception.getCause(), instanceOf(SocketTimeoutException.class)); + + // HTTP server sends a partial response + final byte[] bytes = randomByteArrayOfLength(randomIntBetween(10, 128)); + httpServer.createContext("/bucket/read_blob_incomplete", exchange -> { + exchange.getResponseHeaders().add("Content-Type", "text/plain; charset=utf-8"); + exchange.sendResponseHeaders(HttpStatus.SC_OK, bytes.length); + exchange.getResponseBody().write(bytes, 0, randomIntBetween(1, bytes.length - 1)); + if (randomBoolean()) { + exchange.getResponseBody().flush(); + } + }); + + exception = expectThrows(SocketTimeoutException.class, () -> { + try (InputStream stream = blobContainer.readBlob("read_blob_incomplete")) { + Streams.readFully(stream); + } + }); + assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out")); + } + + public void testWriteBlobWithRetries() throws Exception { + final int maxRetries = randomInt(5); + final CountDown countDown = new CountDown(maxRetries + 1); + + final byte[] bytes = randomByteArrayOfLength(randomIntBetween(1, 512)); + httpServer.createContext("/bucket/write_blob_max_retries", exchange -> { + final BytesReference body = Streams.readFully(exchange.getRequestBody()); + if (countDown.countDown()) { + if (Objects.deepEquals(bytes, BytesReference.toBytes(body))) { + exchange.sendResponseHeaders(HttpStatus.SC_OK, -1); + } else { + exchange.sendResponseHeaders(HttpStatus.SC_BAD_REQUEST, -1); + } + exchange.close(); + return; + } + exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY, + HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1); + exchange.close(); + }); + + final BlobContainer blobContainer = createBlobContainer(maxRetries, null, true, null); + try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) { + blobContainer.writeBlob("write_blob_max_retries", stream, bytes.length, false); + } + assertThat(countDown.isCountedDown(), is(true)); + } + + public void testWriteBlobWithReadTimeouts() { + final TimeValue readTimeout = TimeValue.timeValueMillis(randomIntBetween(100, 500)); + final BlobContainer blobContainer = createBlobContainer(1, readTimeout, true, null); + + // HTTP server does not send a response + httpServer.createContext("/bucket/write_blob_timeout", exchange -> { + if (randomBoolean()) { + Streams.readFully(exchange.getRequestBody()); + } + }); + + final byte[] bytes = randomByteArrayOfLength(randomIntBetween(1, 128)); + Exception exception = expectThrows(IOException.class, () -> { + try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", bytes), bytes.length)) { + blobContainer.writeBlob("write_blob_timeout", stream, bytes.length, false); + } + }); + assertThat(exception.getMessage().toLowerCase(Locale.ROOT), + containsString("unable to upload object [write_blob_timeout] using a single upload")); + + assertThat(exception.getCause(), instanceOf(SdkClientException.class)); + assertThat(exception.getCause().getMessage().toLowerCase(Locale.ROOT), containsString("read timed out")); + + assertThat(exception.getCause().getCause(), instanceOf(SocketTimeoutException.class)); + assertThat(exception.getCause().getCause().getMessage().toLowerCase(Locale.ROOT), containsString("read timed out")); + } + + public void testWriteLargeBlob() throws Exception { + final boolean useTimeout = rarely(); + final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null; + final ByteSizeValue bufferSize = new ByteSizeValue(5, ByteSizeUnit.MB); + final BlobContainer blobContainer = createBlobContainer(null, readTimeout, true, bufferSize); + + final int parts = randomIntBetween(1, 2); + final long lastPartSize = randomLongBetween(10, 512); + final long blobSize = (parts * bufferSize.getBytes()) + lastPartSize; + + final int maxRetries = 2; // we want all requests to fail at least once + final CountDown countDownInitiate = new CountDown(maxRetries); + final AtomicInteger countDownUploads = new AtomicInteger(maxRetries * (parts + 1)); + final CountDown countDownComplete = new CountDown(maxRetries); + + httpServer.createContext("/bucket/write_large_blob", exchange -> { + if ("POST".equals(exchange.getRequestMethod()) + && exchange.getRequestURI().getQuery().equals("uploads")) { + // initiate multipart upload request + if (countDownInitiate.countDown()) { + byte[] response = ("\n" + + "\n" + + " bucket\n" + + " write_large_blob\n" + + " TEST\n" + + "").getBytes(StandardCharsets.UTF_8); + exchange.getResponseHeaders().add("Content-Type", "application/xml"); + exchange.sendResponseHeaders(HttpStatus.SC_OK, response.length); + exchange.getResponseBody().write(response); + exchange.close(); + return; + } + } else if ("PUT".equals(exchange.getRequestMethod())) { + // upload part request + MD5DigestCalculatingInputStream md5 = new MD5DigestCalculatingInputStream(exchange.getRequestBody()); + BytesReference bytes = Streams.readFully(md5); + assertThat((long) bytes.length(), anyOf(equalTo(lastPartSize), equalTo(bufferSize.getBytes()))); + + if (countDownUploads.decrementAndGet() % 2 == 0) { + exchange.getResponseHeaders().add("ETag", Base16.encodeAsString(md5.getMd5Digest())); + exchange.sendResponseHeaders(HttpStatus.SC_OK, -1); + exchange.close(); + return; + } + + } else if ("POST".equals(exchange.getRequestMethod()) + && exchange.getRequestURI().getQuery().equals("uploadId=TEST")) { + // complete multipart upload request + Streams.readFully(exchange.getRequestBody()); + if (countDownComplete.countDown()) { + byte[] response = ("\n" + + "\n" + + " bucket\n" + + " write_large_blob\n" + + "").getBytes(StandardCharsets.UTF_8); + exchange.getResponseHeaders().add("Content-Type", "application/xml"); + exchange.sendResponseHeaders(HttpStatus.SC_OK, response.length); + exchange.getResponseBody().write(response); + exchange.close(); + return; + } + } + + // sends an error back or let the request time out + if (useTimeout == false) { + exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY, + HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1); + exchange.close(); + } + }); + + blobContainer.writeBlob("write_large_blob", new ZeroInputStream(blobSize), blobSize, false); + + assertThat(countDownInitiate.isCountedDown(), is(true)); + assertThat(countDownUploads.get(), equalTo(0)); + assertThat(countDownComplete.isCountedDown(), is(true)); + } + + /** + * A resettable InputStream that only serves zeros. + * + * Ideally it should be wrapped into a BufferedInputStream but it seems that the AWS SDK is calling InputStream{@link #reset()} + * before calling InputStream{@link #mark(int)}, which is not permitted by the {@link #reset()} method contract. + **/ + private static class ZeroInputStream extends InputStream { + + private final AtomicBoolean closed = new AtomicBoolean(false); + private final long length; + private final AtomicLong reads; + private volatile long mark; + + private ZeroInputStream(final long length) { + this.length = length; + this.reads = new AtomicLong(length); + this.mark = -1; + } + + @Override + public int read() throws IOException { + ensureOpen(); + if (reads.decrementAndGet() < 0) { + return -1; + } + return 0; + } + + @Override + public boolean markSupported() { + return true; + } + + @Override + public synchronized void mark(int readlimit) { + mark = reads.get(); + } + + @Override + public synchronized void reset() throws IOException { + ensureOpen(); + reads.set(mark); + } + + @Override + public int available() throws IOException { + ensureOpen(); + return Math.toIntExact(length - reads.get()); + } + + @Override + public void close() throws IOException { + closed.set(true); + } + + private void ensureOpen() throws IOException { + if (closed.get()) { + throw new IOException("Stream closed"); + } + } + } +} diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilElasticsearchCliTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilElasticsearchCliTests.java index e670a4364fe..9e1d6e5710e 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilElasticsearchCliTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilElasticsearchCliTests.java @@ -40,7 +40,7 @@ public class EvilElasticsearchCliTests extends ESElasticsearchCliTestCase { runTest( ExitCodes.OK, true, - output -> {}, + (output, error) -> {}, (foreground, pidFile, quiet, esSettings) -> { Settings settings = esSettings.settings(); assertThat(settings.keySet(), hasSize(2)); @@ -55,7 +55,7 @@ public class EvilElasticsearchCliTests extends ESElasticsearchCliTestCase { runTest( ExitCodes.OK, true, - output -> {}, + (output, error) -> {}, (foreground, pidFile, quiet, esSettings) -> { Settings settings = esSettings.settings(); assertThat(settings.keySet(), hasSize(2)); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/cli/EvilCommandTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/cli/EvilCommandTests.java index 2990101134f..824dd90ec22 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/cli/EvilCommandTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/cli/EvilCommandTests.java @@ -55,7 +55,7 @@ public class EvilCommandTests extends ESTestCase { command.getShutdownHookThread().run(); command.getShutdownHookThread().join(); assertTrue(closed.get()); - final String output = terminal.getOutput(); + final String output = terminal.getErrorOutput(); if (shouldThrow) { // ensure that we dump the exception assertThat(output, containsString("java.io.IOException: fail")); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java index d06efb37a3d..7b531ba0e19 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/ArchiveTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.packaging.test; import org.apache.http.client.fluent.Request; import org.elasticsearch.packaging.util.Archives; -import org.elasticsearch.packaging.util.Distribution; import org.elasticsearch.packaging.util.FileUtils; import org.elasticsearch.packaging.util.Installation; import org.elasticsearch.packaging.util.Platforms; @@ -52,7 +51,6 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.isEmptyString; import static org.junit.Assume.assumeThat; import static org.junit.Assume.assumeTrue; @@ -60,9 +58,8 @@ import static org.junit.Assume.assumeTrue; public class ArchiveTests extends PackagingTestCase { @BeforeClass - public static void assumptions() { - assumeTrue("only archive distributions", - distribution().packaging == Distribution.Packaging.TAR || distribution().packaging == Distribution.Packaging.ZIP); + public static void filterDistros() { + assumeTrue("only archives", distribution.isArchive()); } public void test10Install() throws Exception { @@ -71,20 +68,14 @@ public class ArchiveTests extends PackagingTestCase { } public void test20PluginsListWithNoPlugins() throws Exception { - assumeThat(installation, is(notNullValue())); - final Installation.Executables bin = installation.executables(); - final Shell sh = newShell(); final Result r = sh.run(bin.elasticsearchPlugin + " list"); assertThat(r.stdout, isEmptyString()); } public void test30NoJava() throws Exception { - assumeThat(installation, is(notNullValue())); - final Installation.Executables bin = installation.executables(); - final Shell sh = newShell(); sh.getEnv().remove("JAVA_HOME"); final Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); @@ -105,10 +96,7 @@ public class ArchiveTests extends PackagingTestCase { } public void test40CreateKeystoreManually() throws Exception { - assumeThat(installation, is(notNullValue())); - final Installation.Executables bin = installation.executables(); - final Shell sh = newShell(); Platforms.onLinux(() -> sh.run("sudo -u " + ARCHIVE_OWNER + " " + bin.elasticsearchKeystore + " create")); @@ -138,12 +126,10 @@ public class ArchiveTests extends PackagingTestCase { } public void test50StartAndStop() throws Exception { - assumeThat(installation, is(notNullValue())); - // cleanup from previous test rm(installation.config("elasticsearch.keystore")); - Archives.runElasticsearch(installation, newShell()); + Archives.runElasticsearch(installation, sh); final String gcLogName = Platforms.LINUX && distribution().hasJdk == false ? "gc.log.0.current" @@ -156,8 +142,6 @@ public class ArchiveTests extends PackagingTestCase { } public void assertRunsWithJavaHome() throws Exception { - Shell sh = newShell(); - Platforms.onLinux(() -> { String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); sh.getEnv().put("JAVA_HOME", systemJavaHome); @@ -177,13 +161,10 @@ public class ArchiveTests extends PackagingTestCase { } public void test51JavaHomeOverride() throws Exception { - assumeThat(installation, is(notNullValue())); - assertRunsWithJavaHome(); } public void test52BundledJdkRemoved() throws Exception { - assumeThat(installation, is(notNullValue())); assumeThat(distribution().hasJdk, is(true)); Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); @@ -196,8 +177,6 @@ public class ArchiveTests extends PackagingTestCase { } public void test53JavaHomeWithSpecialCharacters() throws Exception { - assumeThat(installation, is(notNullValue())); - Platforms.onWindows(() -> { final Shell sh = new Shell(); try { @@ -251,13 +230,9 @@ public class ArchiveTests extends PackagingTestCase { } public void test60AutoCreateKeystore() throws Exception { - assumeThat(installation, is(notNullValue())); - assertThat(installation.config("elasticsearch.keystore"), file(File, ARCHIVE_OWNER, ARCHIVE_OWNER, p660)); final Installation.Executables bin = installation.executables(); - final Shell sh = newShell(); - Platforms.onLinux(() -> { final Result result = sh.run("sudo -u " + ARCHIVE_OWNER + " " + bin.elasticsearchKeystore + " list"); assertThat(result.stdout, containsString("keystore.seed")); @@ -270,7 +245,6 @@ public class ArchiveTests extends PackagingTestCase { } public void test70CustomPathConfAndJvmOptions() throws Exception { - assumeThat(installation, is(notNullValue())); final Path tempConf = getTempDir().resolve("esconf-alternate"); @@ -288,7 +262,6 @@ public class ArchiveTests extends PackagingTestCase { "-Dlog4j2.disable.jmx=true\n"; append(tempConf.resolve("jvm.options"), jvmOptions); - final Shell sh = newShell(); Platforms.onLinux(() -> sh.run("chown -R elasticsearch:elasticsearch " + tempConf)); Platforms.onWindows(() -> sh.run( "$account = New-Object System.Security.Principal.NTAccount 'vagrant'; " + @@ -301,11 +274,10 @@ public class ArchiveTests extends PackagingTestCase { "}" )); - final Shell serverShell = newShell(); - serverShell.getEnv().put("ES_PATH_CONF", tempConf.toString()); - serverShell.getEnv().put("ES_JAVA_OPTS", "-XX:-UseCompressedOops"); + sh.getEnv().put("ES_PATH_CONF", tempConf.toString()); + sh.getEnv().put("ES_JAVA_OPTS", "-XX:-UseCompressedOops"); - Archives.runElasticsearch(installation, serverShell); + Archives.runElasticsearch(installation, sh); final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes")); assertThat(nodesResponse, containsString("\"heap_init_in_bytes\":536870912")); @@ -319,7 +291,6 @@ public class ArchiveTests extends PackagingTestCase { } public void test80RelativePathConf() throws Exception { - assumeThat(installation, is(notNullValue())); final Path temp = getTempDir().resolve("esconf-alternate"); final Path tempConf = temp.resolve("config"); @@ -334,7 +305,6 @@ public class ArchiveTests extends PackagingTestCase { append(tempConf.resolve("elasticsearch.yml"), "node.name: relative"); - final Shell sh = newShell(); Platforms.onLinux(() -> sh.run("chown -R elasticsearch:elasticsearch " + temp)); Platforms.onWindows(() -> sh.run( "$account = New-Object System.Security.Principal.NTAccount 'vagrant'; " + @@ -347,10 +317,9 @@ public class ArchiveTests extends PackagingTestCase { "}" )); - final Shell serverShell = newShell(); - serverShell.setWorkingDirectory(temp); - serverShell.getEnv().put("ES_PATH_CONF", "config"); - Archives.runElasticsearch(installation, serverShell); + sh.setWorkingDirectory(temp); + sh.getEnv().put("ES_PATH_CONF", "config"); + Archives.runElasticsearch(installation, sh); final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes")); assertThat(nodesResponse, containsString("\"name\":\"relative\"")); @@ -363,10 +332,7 @@ public class ArchiveTests extends PackagingTestCase { } public void test90SecurityCliPackaging() throws Exception { - assumeThat(installation, is(notNullValue())); - final Installation.Executables bin = installation.executables(); - final Shell sh = newShell(); if (distribution().isDefault()) { assertTrue(Files.exists(installation.lib.resolve("tools").resolve("security-cli"))); @@ -377,7 +343,7 @@ public class ArchiveTests extends PackagingTestCase { // Ensure that the exit code from the java command is passed back up through the shell script result = sh.runIgnoreExitCode(bin.elasticsearchCertutil + " invalid-command"); assertThat(result.exitCode, is(not(0))); - assertThat(result.stdout, containsString("Unknown command [invalid-command]")); + assertThat(result.stderr, containsString("Unknown command [invalid-command]")); }; Platforms.onLinux(action); Platforms.onWindows(action); @@ -387,10 +353,7 @@ public class ArchiveTests extends PackagingTestCase { } public void test91ElasticsearchShardCliPackaging() throws Exception { - assumeThat(installation, is(notNullValue())); - final Installation.Executables bin = installation.executables(); - final Shell sh = newShell(); Platforms.PlatformAction action = () -> { final Result result = sh.run(bin.elasticsearchShard + " -h"); @@ -405,10 +368,7 @@ public class ArchiveTests extends PackagingTestCase { } public void test92ElasticsearchNodeCliPackaging() throws Exception { - assumeThat(installation, is(notNullValue())); - final Installation.Executables bin = installation.executables(); - final Shell sh = newShell(); Platforms.PlatformAction action = () -> { final Result result = sh.run(bin.elasticsearchNode + " -h"); @@ -424,12 +384,9 @@ public class ArchiveTests extends PackagingTestCase { } public void test93ElasticsearchNodeCustomDataPathAndNotEsHomeWorkDir() throws Exception { - assumeThat(installation, is(notNullValue())); - Path relativeDataPath = installation.data.relativize(installation.home); append(installation.config("elasticsearch.yml"), "path.data: " + relativeDataPath); - final Shell sh = newShell(); sh.setWorkingDirectory(getTempDir()); Archives.runElasticsearch(installation, sh); @@ -440,10 +397,7 @@ public class ArchiveTests extends PackagingTestCase { } public void test94ElasticsearchNodeExecuteCliNotEsHomeWorkDir() throws Exception { - assumeThat(installation, is(notNullValue())); - final Installation.Executables bin = installation.executables(); - final Shell sh = newShell(); // Run the cli tools from the tmp dir sh.setWorkingDirectory(getTempDir()); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DebMetadataTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DebMetadataTests.java new file mode 100644 index 00000000000..0a291a9c40d --- /dev/null +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/DebMetadataTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.packaging.test; + +import junit.framework.TestCase; +import org.elasticsearch.packaging.util.Distribution; +import org.elasticsearch.packaging.util.FileUtils; +import org.elasticsearch.packaging.util.Shell; +import org.junit.Before; + +import java.util.regex.Pattern; + +import static org.elasticsearch.packaging.util.FileUtils.getDistributionFile; +import static org.junit.Assume.assumeTrue; + +public class DebMetadataTests extends PackagingTestCase { + + @Before + public void filterDistros() { + assumeTrue("only deb", distribution.packaging == Distribution.Packaging.DEB); + } + + public void test05CheckLintian() { + sh.run("lintian --fail-on-warnings " + FileUtils.getDistributionFile(distribution())); + } + + public void test06Dependencies() { + + final Shell sh = new Shell(); + + final Shell.Result result = sh.run("dpkg -I " + getDistributionFile(distribution())); + + TestCase.assertTrue(Pattern.compile("(?m)^ Depends:.*bash.*").matcher(result.stdout).find()); + + String oppositePackageName = "elasticsearch"; + if (distribution().isDefault()) { + oppositePackageName += "-oss"; + } + + TestCase.assertTrue(Pattern.compile("(?m)^ Conflicts: " + oppositePackageName + "$").matcher(result.stdout).find()); + } +} diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/DebPreservationTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/DebPreservationTests.java index dc87d685d3f..ea4f5565a98 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/DebPreservationTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/DebPreservationTests.java @@ -19,10 +19,8 @@ package org.elasticsearch.packaging.test; -import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; import org.elasticsearch.packaging.util.Distribution; -import org.elasticsearch.packaging.util.Shell; -import org.junit.Before; +import org.junit.BeforeClass; import java.nio.file.Files; import java.nio.file.Paths; @@ -32,37 +30,29 @@ import static org.elasticsearch.packaging.util.FileUtils.assertPathsExist; import static org.elasticsearch.packaging.util.Packages.SYSVINIT_SCRIPT; import static org.elasticsearch.packaging.util.Packages.assertInstalled; import static org.elasticsearch.packaging.util.Packages.assertRemoved; -import static org.elasticsearch.packaging.util.Packages.install; +import static org.elasticsearch.packaging.util.Packages.installPackage; import static org.elasticsearch.packaging.util.Packages.packageStatus; import static org.elasticsearch.packaging.util.Packages.remove; import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation; -import static org.elasticsearch.packaging.util.Platforms.isDPKG; -import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.core.Is.is; -import static org.junit.Assume.assumeThat; import static org.junit.Assume.assumeTrue; -@TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class) public class DebPreservationTests extends PackagingTestCase { - @Before - public void onlyCompatibleDistributions() { - assumeTrue("only dpkg platforms", isDPKG()); - assumeTrue("deb distributions", distribution().packaging == Distribution.Packaging.DEB); - assumeTrue("only bundled jdk", distribution().hasJdk); - assumeTrue("only compatible distributions", distribution().packaging.compatible); + @BeforeClass + public static void filterDistros() { + assumeTrue("only deb", distribution.packaging == Distribution.Packaging.DEB); + assumeTrue("only bundled jdk", distribution.hasJdk); } public void test10Install() throws Exception { assertRemoved(distribution()); - installation = install(distribution()); + installation = installPackage(distribution()); assertInstalled(distribution()); verifyPackageInstallation(installation, distribution(), newShell()); } public void test20Remove() throws Exception { - assumeThat(installation, is(notNullValue())); - remove(distribution()); // some config files were not removed @@ -106,9 +96,6 @@ public class DebPreservationTests extends PackagingTestCase { } public void test30Purge() throws Exception { - assumeThat(installation, is(notNullValue())); - - final Shell sh = new Shell(); sh.run("dpkg --purge " + distribution().flavor.name); assertRemoved(distribution()); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageTests.java index f326fef9d07..a54461a5e3e 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageTests.java @@ -19,15 +19,12 @@ package org.elasticsearch.packaging.test; -import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; import com.carrotsearch.randomizedtesting.generators.RandomStrings; import org.apache.http.client.fluent.Request; -import org.elasticsearch.packaging.util.Distribution; import org.elasticsearch.packaging.util.FileUtils; -import org.elasticsearch.packaging.util.Shell; import org.elasticsearch.packaging.util.Shell.Result; import org.hamcrest.CoreMatchers; -import org.junit.Before; +import org.junit.BeforeClass; import java.nio.charset.StandardCharsets; import java.nio.file.Files; @@ -50,53 +47,39 @@ import static org.elasticsearch.packaging.util.FileUtils.slurp; import static org.elasticsearch.packaging.util.Packages.SYSTEMD_SERVICE; import static org.elasticsearch.packaging.util.Packages.assertInstalled; import static org.elasticsearch.packaging.util.Packages.assertRemoved; -import static org.elasticsearch.packaging.util.Packages.install; +import static org.elasticsearch.packaging.util.Packages.installPackage; import static org.elasticsearch.packaging.util.Packages.remove; import static org.elasticsearch.packaging.util.Packages.restartElasticsearch; import static org.elasticsearch.packaging.util.Packages.startElasticsearch; import static org.elasticsearch.packaging.util.Packages.stopElasticsearch; import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation; import static org.elasticsearch.packaging.util.Platforms.getOsRelease; -import static org.elasticsearch.packaging.util.Platforms.isDPKG; import static org.elasticsearch.packaging.util.Platforms.isSystemd; import static org.elasticsearch.packaging.util.ServerUtils.makeRequest; import static org.elasticsearch.packaging.util.ServerUtils.runElasticsearchTests; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.isEmptyString; import static org.hamcrest.core.Is.is; import static org.junit.Assume.assumeThat; import static org.junit.Assume.assumeTrue; -@TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class) public class PackageTests extends PackagingTestCase { - private Shell sh; - @Before - public void onlyCompatibleDistributions() throws Exception { - assumeTrue("only compatible distributions", distribution().packaging.compatible); - assumeTrue("rpm or deb", - distribution().packaging == Distribution.Packaging.DEB || distribution().packaging == Distribution.Packaging.RPM); - sh = newShell(); - } - - public void test05CheckLintian() throws Exception { - assumeTrue(isDPKG()); - sh.run("lintian --fail-on-warnings " + FileUtils.getDistributionFile(distribution())); + @BeforeClass + public static void filterDistros() { + assumeTrue("rpm or deb", distribution.isPackage()); } public void test10InstallPackage() throws Exception { assertRemoved(distribution()); - installation = install(distribution()); + installation = installPackage(distribution()); assertInstalled(distribution()); verifyPackageInstallation(installation, distribution(), sh); } public void test20PluginsCommandWhenNoPlugins() throws Exception { - assumeThat(installation, is(notNullValue())); - assertThat(sh.run(installation.bin("elasticsearch-plugin") + " list").stdout, isEmptyString()); } @@ -109,13 +92,10 @@ public class PackageTests extends PackagingTestCase { } public void test31InstallDoesNotStartServer() { - assumeThat(installation, is(notNullValue())); - assertThat(sh.run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch"))); } public void assertRunsWithJavaHome() throws Exception { - String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); byte[] originalEnvFile = Files.readAllBytes(installation.envFile); try { Files.write(installation.envFile, ("JAVA_HOME=" + systemJavaHome + "\n").getBytes(StandardCharsets.UTF_8), @@ -132,7 +112,6 @@ public class PackageTests extends PackagingTestCase { } public void test32JavaHomeOverride() throws Exception { - assumeThat(installation, is(notNullValue())); // we always run with java home when no bundled jdk is included, so this test would be repetitive assumeThat(distribution().hasJdk, is(true)); @@ -159,7 +138,6 @@ public class PackageTests extends PackagingTestCase { } public void test42BundledJdkRemoved() throws Exception { - assumeThat(installation, is(notNullValue())); assumeThat(distribution().hasJdk, is(true)); Path relocatedJdk = installation.bundledJdk.getParent().resolve("jdk.relocated"); @@ -173,8 +151,6 @@ public class PackageTests extends PackagingTestCase { public void test40StartServer() throws Exception { String start = sh.runIgnoreExitCode("date ").stdout.trim(); - assumeThat(installation, is(notNullValue())); - startElasticsearch(sh); String journalEntries = sh.runIgnoreExitCode("journalctl _SYSTEMD_UNIT=elasticsearch.service " + @@ -190,8 +166,6 @@ public class PackageTests extends PackagingTestCase { } public void test50Remove() throws Exception { - assumeThat(installation, is(notNullValue())); - // add fake bin directory as if a plugin was installed Files.createDirectories(installation.bin.resolve("myplugin")); @@ -243,9 +217,7 @@ public class PackageTests extends PackagingTestCase { } public void test60Reinstall() throws Exception { - assumeThat(installation, is(notNullValue())); - - installation = install(distribution()); + installation = installPackage(distribution()); assertInstalled(distribution()); verifyPackageInstallation(installation, distribution(), sh); @@ -255,7 +227,7 @@ public class PackageTests extends PackagingTestCase { public void test70RestartServer() throws Exception { try { - installation = install(distribution()); + installation = installPackage(distribution()); assertInstalled(distribution()); startElasticsearch(sh); @@ -270,7 +242,7 @@ public class PackageTests extends PackagingTestCase { public void test72TestRuntimeDirectory() throws Exception { try { - installation = install(distribution()); + installation = installPackage(distribution()); FileUtils.rm(installation.pidDir); startElasticsearch(sh); assertPathsExist(installation.pidDir); @@ -281,7 +253,7 @@ public class PackageTests extends PackagingTestCase { } public void test73gcLogsExist() throws Exception { - installation = install(distribution()); + installation = installPackage(distribution()); startElasticsearch(sh); // it can be gc.log or gc.log.0.current assertThat(installation.logs, fileWithGlobExist("gc.log*")); @@ -316,7 +288,6 @@ public class PackageTests extends PackagingTestCase { public void test81CustomPathConfAndJvmOptions() throws Exception { assumeTrue(isSystemd()); - assumeThat(installation, is(notNullValue())); assertPathsExist(installation.envFile); stopElasticsearch(sh); @@ -344,18 +315,17 @@ public class PackageTests extends PackagingTestCase { sh.runIgnoreExitCode("chown -R elasticsearch:elasticsearch " + tempConf); - final Shell serverShell = newShell(); cp(installation.envFile, tempConf.resolve("elasticsearch.bk"));//backup append(installation.envFile, "ES_PATH_CONF=" + tempConf + "\n"); append(installation.envFile, "ES_JAVA_OPTS=-XX:-UseCompressedOops"); - startElasticsearch(serverShell); + startElasticsearch(sh); final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes")); assertThat(nodesResponse, CoreMatchers.containsString("\"heap_init_in_bytes\":536870912")); assertThat(nodesResponse, CoreMatchers.containsString("\"using_compressed_ordinary_object_pointers\":\"false\"")); - stopElasticsearch(serverShell); + stopElasticsearch(sh); } finally { rm(installation.envFile); @@ -371,7 +341,7 @@ public class PackageTests extends PackagingTestCase { sh.run("systemctl mask systemd-sysctl.service"); - installation = install(distribution()); + installation = installPackage(distribution()); sh.run("systemctl unmask systemd-sysctl.service"); } finally { @@ -383,7 +353,7 @@ public class PackageTests extends PackagingTestCase { // Limits are changed on systemd platforms only assumeTrue(isSystemd()); - installation = install(distribution()); + installation = installPackage(distribution()); startElasticsearch(sh); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java index d9ecb62f9bc..6d7534c8bb4 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/PackagingTestCase.java @@ -32,50 +32,86 @@ import org.elasticsearch.packaging.util.Shell; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import org.junit.Rule; import org.junit.rules.TestName; +import org.junit.rules.TestWatcher; +import org.junit.runner.Description; import org.junit.runner.RunWith; import java.nio.file.Paths; import static org.elasticsearch.packaging.util.Cleanup.cleanEverything; +import static org.junit.Assume.assumeFalse; import static org.junit.Assume.assumeTrue; +/** + * Class that all packaging test cases should inherit from + */ @RunWith(RandomizedRunner.class) @TestMethodProviders({ JUnit3MethodProvider.class }) @TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class) -/** - * Class that all packaging test cases should inherit from. This makes working with the packaging tests more similar to what we're - * familiar with from {@link org.elasticsearch.test.ESTestCase} without having to apply its behavior that's not relevant here - */ public abstract class PackagingTestCase extends Assert { protected final Log logger = LogFactory.getLog(getClass()); - private static Distribution distribution; + // the distribution being tested + protected static final Distribution distribution; static { distribution = new Distribution(Paths.get(System.getProperty("tests.distribution"))); } + // the java installation already installed on the system + protected static final String systemJavaHome; + static { + Shell sh = new Shell(); + if (Platforms.LINUX) { + systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); + } else { + assert Platforms.WINDOWS; + systemJavaHome = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim(); + } + } + + // the current installation of the distribution being tested + protected static Installation installation; + + private static boolean failed; + + @ClassRule + public static final TestWatcher testFailureRule = new TestWatcher() { + @Override + protected void failed(Throwable e, Description description) { + failed = true; + } + }; + + // a shell to run system commands with + protected Shell sh; + @Rule public final TestName testNameRule = new TestName(); - @Before - public void setup() { - assumeTrue("only compatible distributions", distribution().packaging.compatible); - logger.info("[" + testNameRule.getMethodName() + "]: before test"); + @BeforeClass + public static void filterCompatible() { + assumeTrue("only compatible distributions", distribution.packaging.compatible); } - protected static Installation installation; - @BeforeClass public static void cleanup() throws Exception { installation = null; cleanEverything(); } + @Before + public void setup() throws Exception { + assumeFalse(failed); // skip rest of tests once one fails + + sh = newShell(); + } + /** The {@link Distribution} that should be tested in this case */ protected static Distribution distribution() { return distribution; @@ -85,11 +121,9 @@ public abstract class PackagingTestCase extends Assert { Shell sh = new Shell(); if (distribution().hasJdk == false) { Platforms.onLinux(() -> { - String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); sh.getEnv().put("JAVA_HOME", systemJavaHome); }); Platforms.onWindows(() -> { - final String systemJavaHome = sh.run("$Env:SYSTEM_JAVA_HOME").stdout.trim(); sh.getEnv().put("JAVA_HOME", systemJavaHome); }); } diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageConflictTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmMetadataTests.java similarity index 62% rename from qa/os/src/test/java/org/elasticsearch/packaging/test/PackageConflictTests.java rename to qa/os/src/test/java/org/elasticsearch/packaging/test/RpmMetadataTests.java index 93b1146d839..e4e63c4eadd 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/PackageConflictTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmMetadataTests.java @@ -30,37 +30,14 @@ import java.util.regex.Pattern; import static org.elasticsearch.packaging.util.FileUtils.getDistributionFile; import static org.junit.Assume.assumeTrue; -public class PackageConflictTests extends PackagingTestCase { - - private Shell sh; +public class RpmMetadataTests extends PackagingTestCase { @Before - public void onlyCompatibleDistributions() throws Exception { - assumeTrue("only compatible distributions", distribution().packaging.compatible); - assumeTrue("rpm or deb", - distribution().packaging == Distribution.Packaging.DEB || distribution().packaging == Distribution.Packaging.RPM); - sh = newShell(); + public void filterDistros() { + assumeTrue("only rpm", distribution.packaging == Distribution.Packaging.RPM); } - public void test11DebDependencies() { - // TODO: rewrite this test to not use a real second distro to try and install - assumeTrue(Platforms.isDPKG()); - - final Shell sh = new Shell(); - - final Shell.Result result = sh.run("dpkg -I " + getDistributionFile(distribution())); - - TestCase.assertTrue(Pattern.compile("(?m)^ Depends:.*bash.*").matcher(result.stdout).find()); - - String oppositePackageName = "elasticsearch"; - if (distribution().isDefault()) { - oppositePackageName += "-oss"; - } - - TestCase.assertTrue(Pattern.compile("(?m)^ Conflicts: " + oppositePackageName + "$").matcher(result.stdout).find()); - } - - public void test11RpmDependencies() { + public void test11Dependencies() { // TODO: rewrite this test to not use a real second distro to try and install assumeTrue(Platforms.isRPM()); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java index 79a1f1fe493..0509b1d244b 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/RpmPreservationTests.java @@ -19,10 +19,9 @@ package org.elasticsearch.packaging.test; -import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; import org.elasticsearch.packaging.util.Distribution; import org.elasticsearch.packaging.util.Shell; -import org.junit.Before; +import org.junit.BeforeClass; import java.nio.file.Files; import java.nio.file.Path; @@ -34,37 +33,29 @@ import static org.elasticsearch.packaging.util.Packages.SYSTEMD_SERVICE; import static org.elasticsearch.packaging.util.Packages.SYSVINIT_SCRIPT; import static org.elasticsearch.packaging.util.Packages.assertInstalled; import static org.elasticsearch.packaging.util.Packages.assertRemoved; -import static org.elasticsearch.packaging.util.Packages.install; +import static org.elasticsearch.packaging.util.Packages.installPackage; import static org.elasticsearch.packaging.util.Packages.remove; import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation; -import static org.elasticsearch.packaging.util.Platforms.isRPM; import static org.elasticsearch.packaging.util.Platforms.isSystemd; -import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.core.Is.is; -import static org.junit.Assume.assumeThat; import static org.junit.Assume.assumeTrue; -@TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class) public class RpmPreservationTests extends PackagingTestCase { - @Before - public void onlyCompatibleDistributions() { - assumeTrue("only rpm platforms", isRPM()); - assumeTrue("rpm distributions", distribution().packaging == Distribution.Packaging.RPM); + @BeforeClass + public static void filterDistros() { + assumeTrue("only rpm", distribution.packaging == Distribution.Packaging.RPM); assumeTrue("only bundled jdk", distribution().hasJdk); - assumeTrue("only compatible distributions", distribution().packaging.compatible); } public void test10Install() throws Exception { assertRemoved(distribution()); - installation = install(distribution()); + installation = installPackage(distribution()); assertInstalled(distribution()); verifyPackageInstallation(installation, distribution(), newShell()); } public void test20Remove() throws Exception { - assumeThat(installation, is(notNullValue())); - remove(distribution()); // config was removed @@ -80,7 +71,7 @@ public class RpmPreservationTests extends PackagingTestCase { public void test30PreserveConfig() throws Exception { final Shell sh = new Shell(); - installation = install(distribution()); + installation = installPackage(distribution()); assertInstalled(distribution()); verifyPackageInstallation(installation, distribution(), newShell()); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java b/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java index faf1d13fec6..77e83c95228 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/test/WindowsServiceTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.packaging.util.ServerUtils; import org.elasticsearch.packaging.util.Shell; import org.elasticsearch.packaging.util.Shell.Result; import org.junit.After; -import org.junit.Before; import org.junit.BeforeClass; import java.io.IOException; @@ -47,13 +46,6 @@ public class WindowsServiceTests extends PackagingTestCase { private static final String DEFAULT_DISPLAY_NAME = "Elasticsearch " + FileUtils.getCurrentVersion() + " (elasticsearch-service-x64)"; private static String serviceScript; - private Shell sh; - - @Before - public void createShell() { - sh = new Shell(); - } - @BeforeClass public static void ensureWindows() { assumeTrue(Platforms.WINDOWS); diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java index 9d78a998365..aa040fb15fc 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Distribution.java @@ -49,6 +49,14 @@ public class Distribution { return flavor.equals(Flavor.OSS); } + public boolean isArchive() { + return packaging == Packaging.TAR || packaging == Packaging.ZIP; + } + + public boolean isPackage() { + return packaging == Packaging.RPM || packaging == Packaging.DEB; + } + public enum Packaging { TAR(".tar.gz", Platforms.LINUX || Platforms.DARWIN), diff --git a/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java b/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java index 8d456a4cdc0..ca8b2fcb8fa 100644 --- a/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/os/src/test/java/org/elasticsearch/packaging/util/Packages.java @@ -94,7 +94,7 @@ public class Packages { return result; } - public static Installation install(Distribution distribution) throws IOException { + public static Installation installPackage(Distribution distribution) throws IOException { Shell sh = new Shell(); String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); if (distribution.hasJdk == false) { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json new file mode 100644 index 00000000000..43c1687b8b5 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.cleanup_repository.json @@ -0,0 +1,34 @@ +{ + "snapshot.cleanup_repository": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/modules-snapshots.html", + "stability": "stable", + "url": { + "paths": [ + { + "path": "/_snapshot/{repository}/_cleanup", + "methods": [ + "POST" + ], + "parts": { + "repository": { + "type": "string", + "required" : true, + "description": "A repository name" + } + } + } + ] + }, + "params": { + "master_timeout": { + "type" : "time", + "description" : "Explicit operation timeout for connection to master node" + }, + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" + } + }, + "body": {} + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yml index bdcee7af1bc..5669206ee87 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.aliases/10_basic.yml @@ -1,9 +1,37 @@ + --- "Help": + - skip: + version: " - 7.3.99" + reason: "is_write_index is shown in cat.aliases starting version 7.4.0" + - do: cat.aliases: help: true + - match: + $body: | + /^ alias .+ \n + index .+ \n + filter .+ \n + routing.index .+ \n + routing.search .+ \n + is_write_index .+ \n + $/ + +--- +"Help (pre 7.4.0)": + - skip: + version: "7.4.0 - " + features: node_selector + reason: "is_write_index is shown in cat.aliases starting version 7.4.0" + + - do: + node_selector: + version: " - 7.3.99" + cat.aliases: + help: true + - match: $body: | /^ alias .+ \n @@ -26,6 +54,9 @@ --- "Simple alias": + - skip: + version: " - 7.3.99" + reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: indices.create: @@ -47,10 +78,45 @@ - \s+ - \s+ - \s+ + - \s+ $/ +--- +"Simple alias (pre 7.4.0)": + - skip: + version: "7.4.0 - " + features: node_selector + reason: "is_write_index is shown in cat.aliases starting version 7.4.0" + + - do: + indices.create: + index: test + + - do: + indices.put_alias: + index: test + name: test_alias + + - do: + node_selector: + version: " - 7.3.99" + cat.aliases: {} + + - match: + $body: | + /^ + test_alias \s+ + test \s+ + - \s+ + - \s+ + - \s+ + $/ + --- "Complex alias": + - skip: + version: " - 7.3.99" + reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: indices.create: @@ -68,6 +134,7 @@ body: index_routing: ir search_routing: "sr1,sr2" + is_write_index: true filter: term: foo: bar @@ -82,8 +149,50 @@ [*] \s+ ir \s+ sr1,sr2 \s+ + true \s+ $/ +--- +"Complex alias (pre 7.4.0)": + - skip: + version: "7.4.0 - " + features: node_selector + reason: "is_write_index is shown in cat.aliases starting version 7.4.0" + + - do: + indices.create: + index: test + body: + mappings: + properties: + foo: + type: text + + - do: + indices.put_alias: + index: test + name: test_alias + body: + index_routing: ir + search_routing: "sr1,sr2" + filter: + term: + foo: bar + - do: + node_selector: + version: " - 7.3.99" + cat.aliases: {} + + - match: + $body: | + /^ + test_alias \s+ + test \s+ + [*] \s+ + ir \s+ + sr1,sr2 \s+ + $/ + --- "Alias name": @@ -169,6 +278,9 @@ --- "Column headers": + - skip: + version: " - 7.3.99" + reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: indices.create: @@ -189,15 +301,53 @@ index \s+ filter \s+ routing.index \s+ - routing.search + routing.search \s+ + is_write_index \n test_1 \s+ test \s+ - \s+ - \s+ - \s+ + - \s+ $/ +--- +"Column headers (pre 7.4.0)": + - skip: + version: "7.4.0 - " + features: node_selector + reason: "is_write_index is shown in cat.aliases starting version 7.4.0" + + - do: + indices.create: + index: test + + - do: + indices.put_alias: + index: test + name: test_1 + + - do: + node_selector: + version: " - 7.3.99" + cat.aliases: + v: true + + - match: + $body: | + /^ alias \s+ + index \s+ + filter \s+ + routing.index \s+ + routing.search + \n + test_1 \s+ + test \s+ + - \s+ + - \s+ + - \s+ + $/ --- "Select columns": @@ -232,6 +382,9 @@ --- "Alias against closed index": + - skip: + version: " - 7.3.99" + reason: "is_write_index is shown in cat.aliases starting version 7.4.0" - do: indices.create: @@ -255,8 +408,42 @@ - \s+ - \s+ - \s+ + - \s+ $/ +--- +"Alias against closed index (pre 7.4.0)": + - skip: + version: "7.4.0 - " + features: node_selector + reason: "is_write_index is shown in cat.aliases starting version 7.4.0" + + - do: + indices.create: + index: test_index + body: + aliases: + test_alias: {} + + - do: + indices.close: + index: test_index + + - do: + node_selector: + version: " - 7.3.99" + cat.aliases: {} + + - match: + $body: | + /^ + test_alias \s+ + test_index \s+ + - \s+ + - \s+ + - \s+ + $/ + --- "Alias sorting": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml index 92bbe3f5a36..1444e6153fd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/20_translog.yml @@ -1,14 +1,14 @@ --- -setup: +"Translog retention without soft_deletes": - do: indices.create: - index: test + index: test + body: + settings: + soft_deletes.enabled: false - do: cluster.health: wait_for_no_initializing_shards: true - ---- -"Translog retention": - do: indices.stats: metric: [ translog ] @@ -64,6 +64,53 @@ setup: - lte: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } - match: { indices.test.primaries.translog.uncommitted_operations: 0 } +--- +"Translog retention with soft_deletes": + - skip: + version: " - 7.3.99" + reason: "start ignoring translog retention policy with soft-deletes enabled in 7.4" + - do: + indices.create: + index: test + body: + settings: + soft_deletes.enabled: true + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: + indices.stats: + metric: [ translog ] + - set: { indices.test.primaries.translog.size_in_bytes: creation_size } + + - do: + index: + index: test + id: 1 + body: { "foo": "bar" } + + - do: + indices.stats: + metric: [ translog ] + - gt: { indices.test.primaries.translog.size_in_bytes: $creation_size } + - match: { indices.test.primaries.translog.operations: 1 } + - match: { indices.test.primaries.translog.uncommitted_operations: 1 } + # call flush twice to sync the global checkpoint after the last operation so that we can have the safe commit + - do: + indices.flush: + index: test + - do: + indices.flush: + index: test + - do: + indices.stats: + metric: [ translog ] + # after flushing we have one empty translog file while an empty index before flushing has two empty translog files. + - lt: { indices.test.primaries.translog.size_in_bytes: $creation_size } + - match: { indices.test.primaries.translog.operations: 0 } + - lt: { indices.test.primaries.translog.uncommitted_size_in_bytes: $creation_size } + - match: { indices.test.primaries.translog.uncommitted_operations: 0 } + --- "Translog last modified age stats": - skip: @@ -81,11 +128,20 @@ setup: - gte: { indices.test.primaries.translog.earliest_last_modified_age: 0 } --- -"Translog stats on closed indices": +"Translog stats on closed indices without soft-deletes": - skip: version: " - 7.2.99" reason: "closed indices have translog stats starting version 7.3.0" + - do: + indices.create: + index: test + body: + settings: + soft_deletes.enabled: false + - do: + cluster.health: + wait_for_no_initializing_shards: true - do: index: index: test @@ -123,3 +179,40 @@ setup: forbid_closed_indices: false - match: { indices.test.primaries.translog.operations: 3 } - match: { indices.test.primaries.translog.uncommitted_operations: 0 } + +--- +"Translog stats on closed indices with soft-deletes": + - skip: + version: " - 7.3.99" + reason: "start ignoring translog retention policy with soft-deletes enabled in 7.4" + - do: + indices.create: + index: test + body: + settings: + soft_deletes.enabled: true + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: + index: + index: test + id: 1 + body: { "foo": "bar" } + - do: + indices.stats: + metric: [ translog ] + - match: { indices.test.primaries.translog.operations: 1 } + - match: { indices.test.primaries.translog.uncommitted_operations: 1 } + - do: + indices.close: + index: test + wait_for_active_shards: 1 + - is_true: acknowledged + - do: + indices.stats: + metric: [ translog ] + expand_wildcards: all + forbid_closed_indices: false + - match: { indices.test.primaries.translog.operations: 0 } + - match: { indices.test.primaries.translog.uncommitted_operations: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yml index 5b7ac56361c..6168c211383 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/snapshot.create/10_basic.yml @@ -38,6 +38,51 @@ setup: - match: { acknowledged: true } +--- +"Create a snapshot and clean up repository": + - skip: + version: " - 7.99.99" + reason: cleanup introduced in 8.0 + + - do: + snapshot.cleanup_repository: + repository: test_repo_create_1 + + - match: { results.deleted_bytes: 0 } + - match: { results.deleted_blobs: 0 } + + - do: + snapshot.create: + repository: test_repo_create_1 + snapshot: test_snapshot + wait_for_completion: true + + - match: { snapshot.snapshot: test_snapshot } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.successful: 1 } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.cleanup_repository: + repository: test_repo_create_1 + + - match: { results.deleted_bytes: 0 } + - match: { results.deleted_blobs: 0 } + + - do: + snapshot.delete: + repository: test_repo_create_1 + snapshot: test_snapshot + + - match: { acknowledged: true } + + - do: + snapshot.cleanup_repository: + repository: test_repo_create_1 + + - match: { results.deleted_bytes: 0 } + - match: { results.deleted_blobs: 0 } + --- "Create a snapshot for missing index": - skip: diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index f6aae79b83d..79089210fa4 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -116,6 +116,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_2_1 = new Version(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version V_7_3_0 = new Version(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final Version V_7_3_1 = new Version(7030199, org.apache.lucene.util.Version.LUCENE_8_1_0); + public static final Version V_7_3_2 = new Version(7030299, org.apache.lucene.util.Version.LUCENE_8_1_0); public static final Version V_7_4_0 = new Version(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); public static final Version CURRENT = V_7_4_0; diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 3abfe246b12..f4571e0abc9 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -48,6 +48,8 @@ import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageAction; import org.elasticsearch.action.admin.cluster.node.usage.TransportNodesUsageAction; import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction; import org.elasticsearch.action.admin.cluster.remote.TransportRemoteInfoAction; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryAction; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.TransportCleanupRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; @@ -226,6 +228,7 @@ import org.elasticsearch.rest.action.RestFieldCapabilitiesAction; import org.elasticsearch.rest.action.RestMainAction; import org.elasticsearch.rest.action.admin.cluster.RestAddVotingConfigExclusionAction; import org.elasticsearch.rest.action.admin.cluster.RestCancelTasksAction; +import org.elasticsearch.rest.action.admin.cluster.RestCleanupRepositoryAction; import org.elasticsearch.rest.action.admin.cluster.RestClearVotingConfigExclusionsAction; import org.elasticsearch.rest.action.admin.cluster.RestClusterAllocationExplainAction; import org.elasticsearch.rest.action.admin.cluster.RestClusterGetSettingsAction; @@ -455,6 +458,7 @@ public class ActionModule extends AbstractModule { actions.register(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class); actions.register(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class); actions.register(VerifyRepositoryAction.INSTANCE, TransportVerifyRepositoryAction.class); + actions.register(CleanupRepositoryAction.INSTANCE, TransportCleanupRepositoryAction.class); actions.register(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class); actions.register(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class); actions.register(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class); @@ -577,6 +581,7 @@ public class ActionModule extends AbstractModule { registerHandler.accept(new RestGetRepositoriesAction(restController, settingsFilter)); registerHandler.accept(new RestDeleteRepositoryAction(restController)); registerHandler.accept(new RestVerifyRepositoryAction(restController)); + registerHandler.accept(new RestCleanupRepositoryAction(restController)); registerHandler.accept(new RestGetSnapshotsAction(restController)); registerHandler.accept(new RestCreateSnapshotAction(restController)); registerHandler.accept(new RestRestoreSnapshotAction(restController)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryAction.java new file mode 100644 index 00000000000..af57e6d4f00 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryAction.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.repositories.cleanup; + +import org.elasticsearch.action.ActionType; + +public final class CleanupRepositoryAction extends ActionType { + + public static final CleanupRepositoryAction INSTANCE = new CleanupRepositoryAction(); + public static final String NAME = "cluster:admin/repository/_cleanup"; + + private CleanupRepositoryAction() { + super(NAME, CleanupRepositoryResponse::new); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java new file mode 100644 index 00000000000..168cdbb4967 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.repositories.cleanup; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class CleanupRepositoryRequest extends AcknowledgedRequest { + + private String repository; + + public CleanupRepositoryRequest(String repository) { + this.repository = repository; + } + + public CleanupRepositoryRequest(StreamInput in) throws IOException { + repository = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(repository); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (repository == null) { + validationException = addValidationError("repository is null", null); + } + return validationException; + } + + public String name() { + return repository; + } + + public void name(String repository) { + this.repository = repository; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java new file mode 100644 index 00000000000..2f7e6aefdcc --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.repositories.cleanup; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public class CleanupRepositoryRequestBuilder extends MasterNodeOperationRequestBuilder { + + public CleanupRepositoryRequestBuilder(ElasticsearchClient client, ActionType action, + String repository) { + super(client, action, new CleanupRepositoryRequest(repository)); + } + + public CleanupRepositoryRequestBuilder setName(String repository) { + request.name(repository); + return this; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java new file mode 100644 index 00000000000..8516ece9257 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryResponse.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.repositories.cleanup; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.repositories.RepositoryCleanupResult; + +import java.io.IOException; + +public final class CleanupRepositoryResponse extends ActionResponse implements ToXContentObject { + + private static final ObjectParser PARSER = + new ObjectParser<>(CleanupRepositoryResponse.class.getName(), true, CleanupRepositoryResponse::new); + + static { + PARSER.declareObject((response, cleanupResult) -> response.result = cleanupResult, + RepositoryCleanupResult.PARSER, new ParseField("results")); + } + + private RepositoryCleanupResult result; + + public CleanupRepositoryResponse() { + } + + public CleanupRepositoryResponse(RepositoryCleanupResult result) { + this.result = result; + } + + public CleanupRepositoryResponse(StreamInput in) throws IOException { + result = new RepositoryCleanupResult(in); + } + + public RepositoryCleanupResult result() { + return result; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + result.writeTo(out); + } + + public static CleanupRepositoryResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject().field("results"); + result.toXContent(builder, params); + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java new file mode 100644 index 00000000000..42b51604171 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -0,0 +1,248 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.repositories.cleanup; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.RepositoryCleanupInProgress; +import org.elasticsearch.cluster.SnapshotDeletionsInProgress; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.RepositoryCleanupResult; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; + +/** + * Repository cleanup action for repository implementations based on {@link BlobStoreRepository}. + * + * The steps taken by the repository cleanup operation are as follows: + *

    + *
  1. Check that there are no running repository cleanup, snapshot create, or snapshot delete actions + * and add an entry for the repository that is to be cleaned up to {@link RepositoryCleanupInProgress}
  2. + *
  3. Run cleanup actions on the repository. Note, these are executed exclusively on the master node. + * For the precise operations execute see {@link BlobStoreRepository#cleanup}
  4. + *
  5. Remove the entry in {@link RepositoryCleanupInProgress} in the first step.
  6. + *
+ * + * On master failover during the cleanup operation it is simply removed from the cluster state. This is safe because the logic in + * {@link BlobStoreRepository#cleanup} ensures that the repository state id has not changed between creation of the cluster state entry + * and any delete/write operations. TODO: This will not work if we also want to clean up at the shard level as those will involve writes + * as well as deletes. + */ +public final class TransportCleanupRepositoryAction extends TransportMasterNodeAction { + + private static final Logger logger = LogManager.getLogger(TransportCleanupRepositoryAction.class); + + private static final Version MIN_VERSION = Version.V_7_4_0; + + private final RepositoriesService repositoriesService; + + @Override + protected String executor() { + return ThreadPool.Names.GENERIC; + } + + @Inject + public TransportCleanupRepositoryAction(TransportService transportService, ClusterService clusterService, + RepositoriesService repositoriesService, ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(CleanupRepositoryAction.NAME, transportService, clusterService, threadPool, actionFilters, + CleanupRepositoryRequest::new, indexNameExpressionResolver); + this.repositoriesService = repositoriesService; + // We add a state applier that will remove any dangling repository cleanup actions on master failover. + // This is safe to do since cleanups will increment the repository state id before executing any operations to prevent concurrent + // operations from corrupting the repository. This is the same safety mechanism used by snapshot deletes. + clusterService.addStateApplier(event -> { + if (event.localNodeMaster() && event.previousState().nodes().isLocalNodeElectedMaster() == false) { + final RepositoryCleanupInProgress repositoryCleanupInProgress = event.state().custom(RepositoryCleanupInProgress.TYPE); + if (repositoryCleanupInProgress == null || repositoryCleanupInProgress.cleanupInProgress() == false) { + return; + } + clusterService.submitStateUpdateTask("clean up repository cleanup task after master failover", + new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return removeInProgressCleanup(currentState); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + logger.debug("Removed repository cleanup task [{}] from cluster state", repositoryCleanupInProgress); + } + + @Override + public void onFailure(String source, Exception e) { + logger.warn( + "Failed to remove repository cleanup task [{}] from cluster state", repositoryCleanupInProgress); + } + }); + } + }); + } + + private static ClusterState removeInProgressCleanup(final ClusterState currentState) { + RepositoryCleanupInProgress cleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE); + if (cleanupInProgress != null) { + boolean changed = false; + if (cleanupInProgress.cleanupInProgress() == false) { + cleanupInProgress = new RepositoryCleanupInProgress(); + changed = true; + } + if (changed) { + return ClusterState.builder(currentState).putCustom( + RepositoryCleanupInProgress.TYPE, cleanupInProgress).build(); + } + } + return currentState; + } + + @Override + protected CleanupRepositoryResponse read(StreamInput in) throws IOException { + return new CleanupRepositoryResponse(in); + } + + @Override + protected void masterOperation(CleanupRepositoryRequest request, ClusterState state, + ActionListener listener) { + if (state.nodes().getMinNodeVersion().onOrAfter(MIN_VERSION)) { + cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new)); + } else { + throw new IllegalArgumentException("Repository cleanup is only supported from version [" + MIN_VERSION + + "] but the oldest node version in the cluster is [" + state.nodes().getMinNodeVersion() + ']'); + } + } + + @Override + protected ClusterBlockException checkBlock(CleanupRepositoryRequest request, ClusterState state) { + // Cluster is not affected but we look up repositories in metadata + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + /** + * Runs cleanup operations on the given repository. + * @param repositoryName Repository to clean up + * @param listener Listener for cleanup result + */ + private void cleanupRepo(String repositoryName, ActionListener listener) { + final Repository repository = repositoriesService.repository(repositoryName); + if (repository instanceof BlobStoreRepository == false) { + listener.onFailure(new IllegalArgumentException("Repository [" + repositoryName + "] does not support repository cleanup")); + return; + } + final BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; + final long repositoryStateId = repository.getRepositoryData().getGenId(); + logger.info("Running cleanup operations on repository [{}][{}]", repositoryName, repositoryStateId); + clusterService.submitStateUpdateTask("cleanup repository [" + repositoryName + "][" + repositoryStateId + ']', + new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE); + if (repositoryCleanupInProgress != null && repositoryCleanupInProgress.cleanupInProgress() == false) { + throw new IllegalStateException( + "Cannot cleanup [" + repositoryName + "] - a repository cleanup is already in-progress"); + } + SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE); + if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { + throw new IllegalStateException("Cannot cleanup [" + repositoryName + "] - a snapshot is currently being deleted"); + } + SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); + if (snapshots != null && !snapshots.entries().isEmpty()) { + throw new IllegalStateException("Cannot cleanup [" + repositoryName + "] - a snapshot is currently running"); + } + return ClusterState.builder(currentState).putCustom(RepositoryCleanupInProgress.TYPE, + new RepositoryCleanupInProgress( + RepositoryCleanupInProgress.startedEntry(repositoryName, repositoryStateId))).build(); + } + + @Override + public void onFailure(String source, Exception e) { + after(e, null); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + logger.debug("Initialized repository cleanup in cluster state for [{}][{}]", repositoryName, repositoryStateId); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, + l -> blobStoreRepository.cleanup( + repositoryStateId, ActionListener.wrap(result -> after(null, result), e -> after(e, null))))); + } + + private void after(@Nullable Exception failure, @Nullable RepositoryCleanupResult result) { + if (failure == null) { + logger.debug("Finished repository cleanup operations on [{}][{}]", repositoryName, repositoryStateId); + } else { + logger.debug(() -> new ParameterizedMessage( + "Failed to finish repository cleanup operations on [{}][{}]", repositoryName, repositoryStateId), failure); + } + assert failure != null || result != null; + clusterService.submitStateUpdateTask( + "remove repository cleanup task [" + repositoryName + "][" + repositoryStateId + ']', + new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return removeInProgressCleanup(currentState); + } + + @Override + public void onFailure(String source, Exception e) { + if (failure != null) { + e.addSuppressed(failure); + } + logger.warn(() -> + new ParameterizedMessage("[{}] failed to remove repository cleanup task", repositoryName), e); + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + if (failure == null) { + logger.info("Done with repository cleanup on [{}][{}] with result [{}]", + repositoryName, repositoryStateId, result); + listener.onResponse(result); + } else { + logger.warn(() -> new ParameterizedMessage("Failed to run repository cleanup operations on [{}][{}]", + repositoryName, repositoryStateId), failure); + listener.onFailure(failure); + } + } + }); + } + }); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index cf5dfe80cef..063f051b136 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; @@ -115,15 +116,13 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction - buildResponse(request, snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots())), - nodeSnapshotStatuses))); + transportNodesSnapshotsStatus.execute( + new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(Strings.EMPTY_ARRAY)) + .snapshots(snapshots).timeout(request.masterNodeTimeout()), + ActionListener.wrap( + nodeSnapshotStatuses -> threadPool.executor(ThreadPool.Names.GENERIC).execute( + ActionRunnable.wrap(listener, l -> l.onResponse(buildResponse(request, snapshotsService.currentSnapshots( + request.repository(), Arrays.asList(request.snapshots())), nodeSnapshotStatuses)))), listener::onFailure)); } else { // We don't have any in-progress shards, just return current stats listener.onResponse(buildResponse(request, currentSnapshots, null)); diff --git a/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java index 42aaed10d61..2321c6b5f7d 100644 --- a/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -49,6 +49,9 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageResponse; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; @@ -453,6 +456,21 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ GetRepositoriesRequestBuilder prepareGetRepositories(String... name); + /** + * Cleans up repository. + */ + CleanupRepositoryRequestBuilder prepareCleanupRepository(String repository); + + /** + * Cleans up repository. + */ + ActionFuture cleanupRepository(CleanupRepositoryRequest repository); + + /** + * Cleans up repository. + */ + void cleanupRepository(CleanupRepositoryRequest repository, ActionListener listener); + /** * Verifies a repository. */ diff --git a/server/src/main/java/org/elasticsearch/client/Requests.java b/server/src/main/java/org/elasticsearch/client/Requests.java index dfb011a5a12..d9b4794dc46 100644 --- a/server/src/main/java/org/elasticsearch/client/Requests.java +++ b/server/src/main/java/org/elasticsearch/client/Requests.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksReque import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; @@ -471,6 +472,16 @@ public class Requests { return new DeleteRepositoryRequest(name); } + /** + * Cleanup repository + * + * @param name repository name + * @return cleanup repository request + */ + public static CleanupRepositoryRequest cleanupRepositoryRequest(String name) { + return new CleanupRepositoryRequest(name); + } + /** * Verifies snapshot repository * diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 45fd2db340b..82e3ace2ee7 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -64,6 +64,10 @@ import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageAction; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequest; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageRequestBuilder; import org.elasticsearch.action.admin.cluster.node.usage.NodesUsageResponse; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryAction; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequestBuilder; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequestBuilder; @@ -1019,6 +1023,21 @@ public abstract class AbstractClient implements Client { return new GetRepositoriesRequestBuilder(this, GetRepositoriesAction.INSTANCE, name); } + @Override + public CleanupRepositoryRequestBuilder prepareCleanupRepository(String repository) { + return new CleanupRepositoryRequestBuilder(this, CleanupRepositoryAction.INSTANCE, repository); + } + + @Override + public ActionFuture cleanupRepository(CleanupRepositoryRequest request) { + return execute(CleanupRepositoryAction.INSTANCE, request); + } + + @Override + public void cleanupRepository(CleanupRepositoryRequest request, ActionListener listener) { + execute(CleanupRepositoryAction.INSTANCE, request, listener); + } + @Override public ActionFuture restoreSnapshot(RestoreSnapshotRequest request) { return execute(RestoreSnapshotAction.INSTANCE, request); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index cae1215ae79..99428aaad87 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -121,6 +121,8 @@ public class ClusterModule extends AbstractModule { registerClusterCustom(entries, RestoreInProgress.TYPE, RestoreInProgress::new, RestoreInProgress::readDiffFrom); registerClusterCustom(entries, SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress::new, SnapshotDeletionsInProgress::readDiffFrom); + registerClusterCustom(entries, RepositoryCleanupInProgress.TYPE, RepositoryCleanupInProgress::new, + RepositoryCleanupInProgress::readDiffFrom); // Metadata registerMetaDataCustom(entries, RepositoriesMetaData.TYPE, RepositoriesMetaData::new, RepositoriesMetaData::readDiffFrom); registerMetaDataCustom(entries, IngestMetadata.TYPE, IngestMetadata::new, IngestMetadata::readDiffFrom); diff --git a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java new file mode 100644 index 00000000000..a8cb897f0d3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster; + +import org.elasticsearch.Version; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +public final class RepositoryCleanupInProgress extends AbstractNamedDiffable implements ClusterState.Custom { + + public static final String TYPE = "repository_cleanup"; + + private final List entries; + + public RepositoryCleanupInProgress(Entry... entries) { + this.entries = Arrays.asList(entries); + } + + RepositoryCleanupInProgress(StreamInput in) throws IOException { + this.entries = in.readList(Entry::new); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(ClusterState.Custom.class, TYPE, in); + } + + public static Entry startedEntry(String repository, long repositoryStateId) { + return new Entry(repository, repositoryStateId); + } + + public boolean cleanupInProgress() { + // TODO: Should we allow parallelism across repositories here maybe? + return entries.isEmpty(); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(entries); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray(TYPE); + for (Entry entry : entries) { + builder.startObject(); + { + builder.field("repository", entry.repository); + } + builder.endObject(); + } + builder.endArray(); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this); + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_7_4_0; + } + + public static final class Entry implements Writeable { + + private final String repository; + + private final long repositoryStateId; + + private Entry(StreamInput in) throws IOException { + repository = in.readString(); + repositoryStateId = in.readLong(); + } + + private Entry(String repository, long repositoryStateId) { + this.repository = repository; + this.repositoryStateId = repositoryStateId; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(repository); + out.writeLong(repositoryStateId); + } + + @Override + public String toString() { + return "{" + repository + '}' + '{' + repositoryStateId + '}'; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java index 94c6ea43d38..83de4aba8e6 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java @@ -102,9 +102,11 @@ public interface BlobContainer { /** * Deletes this container and all its contents from the repository. + * + * @return delete result * @throws IOException on failure */ - void delete() throws IOException; + DeleteResult delete() throws IOException; /** * Deletes the blobs with given names. Unlike {@link #deleteBlob(String)} this method will not throw an exception diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/DeleteResult.java b/server/src/main/java/org/elasticsearch/common/blobstore/DeleteResult.java new file mode 100644 index 00000000000..9f74e31ad7d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/blobstore/DeleteResult.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.blobstore; + +/** + * The result of deleting multiple blobs from a {@link BlobStore}. + */ +public final class DeleteResult { + + public static final DeleteResult ZERO = new DeleteResult(0, 0); + + private final long blobsDeleted; + private final long bytesDeleted; + + public DeleteResult(long blobsDeleted, long bytesDeleted) { + this.blobsDeleted = blobsDeleted; + this.bytesDeleted = bytesDeleted; + } + + public long blobsDeleted() { + return blobsDeleted; + } + + public long bytesDeleted() { + return bytesDeleted; + } + + public DeleteResult add(DeleteResult other) { + return new DeleteResult(blobsDeleted + other.blobsDeleted(), bytesDeleted + other.bytesDeleted()); + } + + public DeleteResult add(long blobs, long bytes) { + return new DeleteResult(blobsDeleted + blobs, bytesDeleted + bytes); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index 6723a70a9ab..9dbb46913c2 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.core.internal.io.IOUtils; @@ -45,6 +46,7 @@ import java.nio.file.StandardOpenOption; import java.nio.file.attribute.BasicFileAttributes; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; import static java.util.Collections.unmodifiableMap; @@ -123,8 +125,26 @@ public class FsBlobContainer extends AbstractBlobContainer { } @Override - public void delete() throws IOException { - IOUtils.rm(path); + public DeleteResult delete() throws IOException { + final AtomicLong filesDeleted = new AtomicLong(0L); + final AtomicLong bytesDeleted = new AtomicLong(0L); + Files.walkFileTree(path, new SimpleFileVisitor() { + @Override + public FileVisitResult postVisitDirectory(Path dir, IOException impossible) throws IOException { + assert impossible == null; + Files.delete(dir); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + Files.delete(file); + filesDeleted.incrementAndGet(); + bytesDeleted.addAndGet(attrs.size()); + return FileVisitResult.CONTINUE; + } + }); + return new DeleteResult(filesDeleted.get(), bytesDeleted.get()); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 229ed7ef501..ad59044e439 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -447,6 +447,7 @@ public final class ClusterSettings extends AbstractScopedSettings { Client.CLIENT_TYPE_SETTING_S, ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING, EsExecutors.PROCESSORS_SETTING, + EsExecutors.NODE_PROCESSORS_SETTING, ThreadContext.DEFAULT_HEADERS_SETTING, Loggers.LOG_DEFAULT_LEVEL_SETTING, Loggers.LOG_LEVEL_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java index 561a820d490..681e1ac72cf 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/EsExecutors.java @@ -44,6 +44,7 @@ import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.stream.Collectors; public class EsExecutors { @@ -56,19 +57,33 @@ public class EsExecutors { public static final Setting PROCESSORS_SETTING = new Setting<>( "processors", s -> Integer.toString(Runtime.getRuntime().availableProcessors()), - s -> { - final int value = Setting.parseInt(s, 1, "processors"); + processorsParser("processors"), + Property.Deprecated, + Property.NodeScope); + + /** + * Setting to manually set the number of available processors. This setting is used to adjust thread pool sizes per node. + */ + public static final Setting NODE_PROCESSORS_SETTING = new Setting<>( + "node.processors", + PROCESSORS_SETTING, + processorsParser("node.processors"), + Property.NodeScope); + + private static Function processorsParser(final String name) { + return s -> { + final int value = Setting.parseInt(s, 1, name); final int availableProcessors = Runtime.getRuntime().availableProcessors(); if (value > availableProcessors) { deprecationLogger.deprecatedAndMaybeLog( "processors", - "setting processors to value [{}] which is more than available processors [{}] is deprecated", + "setting [" + name + "] to value [{}] which is more than available processors [{}] is deprecated", value, availableProcessors); } return value; - }, - Property.NodeScope); + }; + } /** * Returns the number of available processors. Defaults to @@ -79,7 +94,7 @@ public class EsExecutors { * @return the number of available processors */ public static int numberOfProcessors(final Settings settings) { - return PROCESSORS_SETTING.get(settings); + return NODE_PROCESSORS_SETTING.get(settings); } public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(String name, ThreadFactory threadFactory, diff --git a/server/src/main/java/org/elasticsearch/http/HttpInfo.java b/server/src/main/java/org/elasticsearch/http/HttpInfo.java index cf9e4672d62..f3457495294 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpInfo.java +++ b/server/src/main/java/org/elasticsearch/http/HttpInfo.java @@ -45,7 +45,7 @@ public class HttpInfo implements Writeable, ToXContentFragment { private final BoundTransportAddress address; private final long maxContentLength; - private final boolean cnameInPublishHost; + private final boolean cnameInPublishHostProperty; public HttpInfo(StreamInput in) throws IOException { this(new BoundTransportAddress(in), in.readLong(), CNAME_IN_PUBLISH_HOST); @@ -55,10 +55,10 @@ public class HttpInfo implements Writeable, ToXContentFragment { this(address, maxContentLength, CNAME_IN_PUBLISH_HOST); } - HttpInfo(BoundTransportAddress address, long maxContentLength, boolean cnameInPublishHost) { + HttpInfo(BoundTransportAddress address, long maxContentLength, boolean cnameInPublishHostProperty) { this.address = address; this.maxContentLength = maxContentLength; - this.cnameInPublishHost = cnameInPublishHost; + this.cnameInPublishHostProperty = cnameInPublishHostProperty; } @Override @@ -83,13 +83,11 @@ public class HttpInfo implements Writeable, ToXContentFragment { String publishAddressString = publishAddress.toString(); String hostString = publishAddress.address().getHostString(); if (InetAddresses.isInetAddress(hostString) == false) { - if (cnameInPublishHost) { - publishAddressString = hostString + '/' + publishAddress.toString(); - } else { + publishAddressString = hostString + '/' + publishAddress.toString(); + if (cnameInPublishHostProperty) { deprecationLogger.deprecated( - "[http.publish_host] was printed as [ip:port] instead of [hostname/ip:port]. " - + "This format is deprecated and will change to [hostname/ip:port] in a future version. " - + "Use -Des.http.cname_in_publish_address=true to enforce non-deprecated formatting." + "es.http.cname_in_publish_address system property is deprecated and no longer affects http.publish_address " + + "formatting. Remove this property to get rid of this deprecation warning." ); } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index ca8a24ea93d..a8e629e2aff 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -195,24 +195,6 @@ public final class IndexSettings { new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), Property.Dynamic, Property.IndexScope); - /** - * Controls how long translog files that are no longer needed for persistence reasons - * will be kept around before being deleted. A longer retention policy is useful to increase - * the chance of ops based recoveries. - **/ - public static final Setting INDEX_TRANSLOG_RETENTION_AGE_SETTING = - Setting.timeSetting("index.translog.retention.age", TimeValue.timeValueHours(12), TimeValue.timeValueMillis(-1), - Property.Dynamic, Property.IndexScope); - - /** - * Controls how many translog files that are no longer needed for persistence reasons - * will be kept around before being deleted. Keeping more files is useful to increase - * the chance of ops based recoveries. - **/ - public static final Setting INDEX_TRANSLOG_RETENTION_SIZE_SETTING = - Setting.byteSizeSetting("index.translog.retention.size", new ByteSizeValue(512, ByteSizeUnit.MB), Property.Dynamic, - Property.IndexScope); - /** * The maximum size of a translog generation. This is independent of the maximum size of * translog operations that have not been flushed. @@ -258,6 +240,27 @@ public final class IndexSettings { Setting.longSetting("index.soft_deletes.retention.operations", 0, 0, Property.IndexScope, Property.Dynamic); + /** + * Controls how long translog files that are no longer needed for persistence reasons + * will be kept around before being deleted. Keeping more files is useful to increase + * the chance of ops based recoveries for indices with soft-deletes disabled. + * This setting will be ignored if soft-deletes is enabled. + **/ + public static final Setting INDEX_TRANSLOG_RETENTION_AGE_SETTING = + Setting.timeSetting("index.translog.retention.age", + settings -> INDEX_SOFT_DELETES_SETTING.get(settings) ? TimeValue.MINUS_ONE : TimeValue.timeValueHours(12), TimeValue.MINUS_ONE, + Property.Dynamic, Property.IndexScope); + + /** + * Controls how many translog files that are no longer needed for persistence reasons + * will be kept around before being deleted. Keeping more files is useful to increase + * the chance of ops based recoveries for indices with soft-deletes disabled. + * This setting will be ignored if soft-deletes is enabled. + **/ + public static final Setting INDEX_TRANSLOG_RETENTION_SIZE_SETTING = + Setting.byteSizeSetting("index.translog.retention.size", settings -> INDEX_SOFT_DELETES_SETTING.get(settings) ? "-1" : "512MB", + Property.Dynamic, Property.IndexScope); + /** * Controls the maximum length of time since a retention lease is created or renewed before it is considered expired. */ @@ -466,8 +469,6 @@ public final class IndexSettings { syncInterval = INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.get(settings); refreshInterval = scopedSettings.get(INDEX_REFRESH_INTERVAL_SETTING); flushThresholdSize = scopedSettings.get(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING); - translogRetentionAge = scopedSettings.get(INDEX_TRANSLOG_RETENTION_AGE_SETTING); - translogRetentionSize = scopedSettings.get(INDEX_TRANSLOG_RETENTION_SIZE_SETTING); generationThresholdSize = scopedSettings.get(INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING); mergeSchedulerConfig = new MergeSchedulerConfig(this); gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis(); @@ -493,6 +494,8 @@ public final class IndexSettings { this.indexSortConfig = new IndexSortConfig(this); searchIdleAfter = scopedSettings.get(INDEX_SEARCH_IDLE_AFTER); defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE); + setTranslogRetentionAge(scopedSettings.get(INDEX_TRANSLOG_RETENTION_AGE_SETTING)); + setTranslogRetentionSize(scopedSettings.get(INDEX_TRANSLOG_RETENTION_SIZE_SETTING)); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, @@ -553,11 +556,21 @@ public final class IndexSettings { } private void setTranslogRetentionSize(ByteSizeValue byteSizeValue) { - this.translogRetentionSize = byteSizeValue; + if (softDeleteEnabled && byteSizeValue.getBytes() >= 0) { + // ignore the translog retention settings if soft-deletes enabled + this.translogRetentionSize = new ByteSizeValue(-1); + } else { + this.translogRetentionSize = byteSizeValue; + } } private void setTranslogRetentionAge(TimeValue age) { - this.translogRetentionAge = age; + if (softDeleteEnabled && age.millis() >= 0) { + // ignore the translog retention settings if soft-deletes enabled + this.translogRetentionAge = TimeValue.MINUS_ONE; + } else { + this.translogRetentionAge = age; + } } private void setGenerationThresholdSize(final ByteSizeValue generationThresholdSize) { @@ -734,13 +747,19 @@ public final class IndexSettings { /** * Returns the transaction log retention size which controls how much of the translog is kept around to allow for ops based recoveries */ - public ByteSizeValue getTranslogRetentionSize() { return translogRetentionSize; } + public ByteSizeValue getTranslogRetentionSize() { + assert softDeleteEnabled == false || translogRetentionSize.getBytes() == -1L : translogRetentionSize; + return translogRetentionSize; + } /** * Returns the transaction log retention age which controls the maximum age (time from creation) that translog files will be kept * around */ - public TimeValue getTranslogRetentionAge() { return translogRetentionAge; } + public TimeValue getTranslogRetentionAge() { + assert softDeleteEnabled == false || translogRetentionAge.millis() == -1L : translogRetentionSize; + return translogRetentionAge; + } /** * Returns the generation threshold size. As sequence numbers can cause multiple generations to diff --git a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java index 09a87124110..90d77981a76 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -221,7 +221,7 @@ public class Analysis { * If the word list cannot be found at either key. */ public static List getWordList(Environment env, Settings settings, String settingPrefix) { - return getWordList(env, settings, settingPrefix + "_path", settingPrefix); + return getWordList(env, settings, settingPrefix + "_path", settingPrefix, true); } /** @@ -231,7 +231,8 @@ public class Analysis { * @throws IllegalArgumentException * If the word list cannot be found at either key. */ - public static List getWordList(Environment env, Settings settings, String settingPath, String settingList) { + public static List getWordList(Environment env, Settings settings, + String settingPath, String settingList, boolean removeComments) { String wordListPath = settings.get(settingPath, null); if (wordListPath == null) { @@ -246,7 +247,7 @@ public class Analysis { final Path path = env.configFile().resolve(wordListPath); try { - return loadWordList(path, "#"); + return loadWordList(path, removeComments); } catch (CharacterCodingException ex) { String message = String.format(Locale.ROOT, "Unsupported character encoding detected while reading %s: %s - files must be UTF-8 encoded", @@ -258,15 +259,15 @@ public class Analysis { } } - private static List loadWordList(Path path, String comment) throws IOException { + private static List loadWordList(Path path, boolean removeComments) throws IOException { final List result = new ArrayList<>(); try (BufferedReader br = Files.newBufferedReader(path, StandardCharsets.UTF_8)) { String word; while ((word = br.readLine()) != null) { - if (!Strings.hasText(word)) { + if (Strings.hasText(word) == false) { continue; } - if (!word.startsWith(comment)) { + if (removeComments == false || word.startsWith("#") == false) { result.add(word.trim()); } } diff --git a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java index 56571679906..eebb5233dbe 100644 --- a/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java +++ b/server/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java @@ -44,6 +44,8 @@ import static java.util.Collections.unmodifiableMap; */ public class BlobStoreIndexShardSnapshots implements Iterable, ToXContentFragment { + public static final BlobStoreIndexShardSnapshots EMPTY = new BlobStoreIndexShardSnapshots(Collections.emptyList()); + private final List shardSnapshots; private final Map files; private final Map> physicalFiles; diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 015c21134c6..02117e8a8ca 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -1649,6 +1649,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * @throws IOException if an I/O exception occurred during any file operations */ public void rollGeneration() throws IOException { + // make sure we move most of the data to disk outside of the writeLock + // in order to reduce the time the lock is held since it's blocking all threads + sync(); try (Releasable ignored = writeLock.acquire()) { try { final TranslogReader reader = current.closeIntoReader(); diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java index 7cf165a5b11..55a24d30991 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java @@ -177,11 +177,19 @@ public class TruncateTranslogAction { final TranslogConfig translogConfig = new TranslogConfig(shardPath.getShardId(), translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); long primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardPath.getShardId().id()); - final TranslogDeletionPolicy translogDeletionPolicy = - new TranslogDeletionPolicy(indexSettings.getTranslogRetentionSize().getBytes(), - indexSettings.getTranslogRetentionAge().getMillis()); + // We open translog to check for corruption, do not clean anything. + final TranslogDeletionPolicy retainAllTranslogPolicy = new TranslogDeletionPolicy(Long.MAX_VALUE, Long.MAX_VALUE) { + @Override + long minTranslogGenRequired(List readers, TranslogWriter writer) { + long minGen = writer.generation; + for (TranslogReader reader : readers) { + minGen = Math.min(reader.generation, minGen); + } + return minGen; + } + }; try (Translog translog = new Translog(translogConfig, translogUUID, - translogDeletionPolicy, () -> translogGlobalCheckpoint, () -> primaryTerm, seqNo -> {}); + retainAllTranslogPolicy, () -> translogGlobalCheckpoint, () -> primaryTerm, seqNo -> {}); Translog.Snapshot snapshot = translog.newSnapshot()) { //noinspection StatementWithEmptyBody we are just checking that we can iterate through the whole snapshot while (snapshot.next() != null) { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java index a4be25ae4fb..ef47b153f53 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java @@ -112,6 +112,11 @@ public class PeerRecoverySourceService implements IndexEventListener { } } + // exposed for testing + final int numberOfOngoingRecoveries() { + return ongoingRecoveries.ongoingRecoveries.size(); + } + final class OngoingRecoveries { private final Map ongoingRecoveries = new HashMap<>(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 5405929bfb6..65bcc1942ef 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -33,7 +33,9 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.StepListener; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.ThreadedActionListener; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -232,8 +234,7 @@ public class RecoverySourceHandler { try { final int estimateNumOps = shard.estimateNumberOfHistoryOperations("peer-recovery", startingSeqNo); - shard.store().incRef(); - final Releasable releaseStore = Releasables.releaseOnce(shard.store()::decRef); + final Releasable releaseStore = acquireStore(shard.store()); resources.add(releaseStore); sendFileStep.whenComplete(r -> IOUtils.close(safeCommitRef, releaseStore), e -> { try { @@ -396,6 +397,25 @@ public class RecoverySourceHandler { }); } + /** + * Increases the store reference and returns a {@link Releasable} that will decrease the store reference using the generic thread pool. + * We must never release the store using an interruptible thread as we can risk invalidating the node lock. + */ + private Releasable acquireStore(Store store) { + store.incRef(); + return Releasables.releaseOnce(() -> { + final PlainActionFuture future = new PlainActionFuture<>(); + threadPool.generic().execute(new ActionRunnable(future) { + @Override + protected void doRun() { + store.decRef(); + listener.onResponse(null); + } + }); + FutureUtils.get(future); + }); + } + static final class SendFileResult { final List phase1FileNames; final List phase1FileSizes; diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 99ebb5d3a2e..26197e61264 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -496,8 +496,9 @@ public class Node implements Closeable { RepositoriesService repositoryService = repositoriesModule.getRepositoryService(); SnapshotsService snapshotsService = new SnapshotsService(settings, clusterService, clusterModule.getIndexNameExpressionResolver(), repositoryService, threadPool); - SnapshotShardsService snapshotShardsService = new SnapshotShardsService(settings, clusterService, snapshotsService, threadPool, - transportService, indicesService, actionModule.getActionFilters(), clusterModule.getIndexNameExpressionResolver()); + SnapshotShardsService snapshotShardsService = new SnapshotShardsService(settings, clusterService, repositoryService, + threadPool, transportService, indicesService, actionModule.getActionFilters(), + clusterModule.getIndexNameExpressionResolver()); TransportNodesSnapshotsStatus nodesSnapshotsStatus = new TransportNodesSnapshotsStatus(threadPool, clusterService, transportService, snapshotShardsService, actionModule.getActionFilters()); RestoreService restoreService = new RestoreService(clusterService, repositoryService, clusterModule.getAllocationService(), diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginSecurity.java b/server/src/main/java/org/elasticsearch/plugins/PluginSecurity.java index d2246259ab7..c845ff4d3a5 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginSecurity.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginSecurity.java @@ -55,15 +55,15 @@ class PluginSecurity { // sort permissions in a reasonable order Collections.sort(requested); - terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); - terminal.println(Verbosity.NORMAL, "@ WARNING: plugin requires additional permissions @"); - terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); + terminal.errorPrintln(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); + terminal.errorPrintln(Verbosity.NORMAL, "@ WARNING: plugin requires additional permissions @"); + terminal.errorPrintln(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"); // print all permissions: for (String permission : requested) { - terminal.println(Verbosity.NORMAL, "* " + permission); + terminal.errorPrintln(Verbosity.NORMAL, "* " + permission); } - terminal.println(Verbosity.NORMAL, "See http://docs.oracle.com/javase/8/docs/technotes/guides/security/permissions.html"); - terminal.println(Verbosity.NORMAL, "for descriptions of what these permissions allow and the associated risks."); + terminal.errorPrintln(Verbosity.NORMAL, "See http://docs.oracle.com/javase/8/docs/technotes/guides/security/permissions.html"); + terminal.errorPrintln(Verbosity.NORMAL, "for descriptions of what these permissions allow and the associated risks."); prompt(terminal, batch); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryCleanupResult.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryCleanupResult.java new file mode 100644 index 00000000000..bec61e02ee8 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryCleanupResult.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.blobstore.DeleteResult; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +public final class RepositoryCleanupResult implements Writeable, ToXContentObject { + + public static final ObjectParser PARSER = + new ObjectParser<>(RepositoryCleanupResult.class.getName(), true, RepositoryCleanupResult::new); + + private static final String DELETED_BLOBS = "deleted_blobs"; + + private static final String DELETED_BYTES = "deleted_bytes"; + + static { + PARSER.declareLong((result, bytes) -> result.bytes = bytes, new ParseField(DELETED_BYTES)); + PARSER.declareLong((result, blobs) -> result.blobs = blobs, new ParseField(DELETED_BLOBS)); + } + + private long bytes; + + private long blobs; + + private RepositoryCleanupResult() { + this(DeleteResult.ZERO); + } + + public RepositoryCleanupResult(DeleteResult result) { + this.blobs = result.blobsDeleted(); + this.bytes = result.bytesDeleted(); + } + + public RepositoryCleanupResult(StreamInput in) throws IOException { + bytes = in.readLong(); + blobs = in.readLong(); + } + + public long bytes() { + return bytes; + } + + public long blobs() { + return blobs; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(bytes); + out.writeLong(blobs); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject().field(DELETED_BYTES, bytes).field(DELETED_BLOBS, blobs).endObject(); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index b585e8feb77..3c8d5d5c0af 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -44,6 +44,7 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.fs.FsBlobContainer; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -80,6 +81,7 @@ import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.RepositoryCleanupResult; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositoryVerificationException; @@ -428,7 +430,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp updatedRepositoryData = repositoryData.removeSnapshot(snapshotId); // Cache the indices that were found before writing out the new index-N blob so that a stuck master will never // delete an index that was created by another master node after writing this index-N blob. - foundIndices = blobStore().blobContainer(basePath().add("indices")).children(); + + foundIndices = blobStore().blobContainer(indicesPath()).children(); writeIndexGen(updatedRepositoryData, repositoryStateId); } catch (Exception ex) { listener.onFailure(new RepositoryException(metadata.name(), "failed to delete snapshot [" + snapshotId + "]", ex)); @@ -451,18 +454,61 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp .orElse(Collections.emptyList()), snapshotId, ActionListener.map(listener, v -> { - cleanupStaleIndices(foundIndices, survivingIndices); - cleanupStaleRootFiles(Sets.difference(rootBlobs, new HashSet<>(snapMetaFilesToDelete)), updatedRepositoryData); + cleanupStaleIndices(foundIndices, survivingIndices.values().stream().map(IndexId::getId).collect(Collectors.toSet())); + cleanupStaleRootFiles( + staleRootBlobs(updatedRepositoryData, Sets.difference(rootBlobs, new HashSet<>(snapMetaFilesToDelete)))); return null; }) ); } } - private void cleanupStaleRootFiles(Set rootBlobNames, RepositoryData repositoryData) { + /** + * Runs cleanup actions on the repository. Increments the repository state id by one before executing any modifications on the + * repository. + * TODO: Add shard level cleanups + *
    + *
  • Deleting stale indices {@link #cleanupStaleIndices}
  • + *
  • Deleting unreferenced root level blobs {@link #cleanupStaleRootFiles}
  • + *
+ * @param repositoryStateId Current repository state id + * @param listener Lister to complete when done + */ + public void cleanup(long repositoryStateId, ActionListener listener) { + ActionListener.completeWith(listener, () -> { + if (isReadOnly()) { + throw new RepositoryException(metadata.name(), "cannot run cleanup on readonly repository"); + } + final RepositoryData repositoryData = getRepositoryData(); + if (repositoryData.getGenId() != repositoryStateId) { + // Check that we are working on the expected repository version before gathering the data to clean up + throw new RepositoryException(metadata.name(), "concurrent modification of the repository before cleanup started, " + + "expected current generation [" + repositoryStateId + "], actual current generation [" + + repositoryData.getGenId() + "]"); + } + Map rootBlobs = blobContainer().listBlobs(); + final Map foundIndices = blobStore().blobContainer(indicesPath()).children(); + final Set survivingIndexIds = + repositoryData.getIndices().values().stream().map(IndexId::getId).collect(Collectors.toSet()); + final List staleRootBlobs = staleRootBlobs(repositoryData, rootBlobs.keySet()); + if (survivingIndexIds.equals(foundIndices.keySet()) && staleRootBlobs.isEmpty()) { + // Nothing to clean up we return + return new RepositoryCleanupResult(DeleteResult.ZERO); + } + // write new index-N blob to ensure concurrent operations will fail + writeIndexGen(repositoryData, repositoryStateId); + final DeleteResult deleteIndicesResult = cleanupStaleIndices(foundIndices, survivingIndexIds); + List cleaned = cleanupStaleRootFiles(staleRootBlobs); + return new RepositoryCleanupResult( + deleteIndicesResult.add(cleaned.size(), cleaned.stream().mapToLong(name -> rootBlobs.get(name).length()).sum())); + }); + } + + // Finds all blobs directly under the repository root path that are not referenced by the current RepositoryData + private List staleRootBlobs(RepositoryData repositoryData, Set rootBlobNames) { final Set allSnapshotIds = repositoryData.getSnapshotIds().stream().map(SnapshotId::getUUID).collect(Collectors.toSet()); - final List blobsToDelete = rootBlobNames.stream().filter( + return rootBlobNames.stream().filter( blob -> { if (FsBlobContainer.isTempBlobName(blob)) { return true; @@ -483,12 +529,16 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp return false; } ).collect(Collectors.toList()); + } + + private List cleanupStaleRootFiles(List blobsToDelete) { if (blobsToDelete.isEmpty()) { - return; + return blobsToDelete; } try { logger.info("[{}] Found stale root level blobs {}. Cleaning them up", metadata.name(), blobsToDelete); blobContainer().deleteBlobsIgnoringIfNotExists(blobsToDelete); + return blobsToDelete; } catch (IOException e) { logger.warn(() -> new ParameterizedMessage( "[{}] The following blobs are no longer part of any snapshot [{}] but failed to remove them", @@ -500,18 +550,18 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp assert false : e; logger.warn(new ParameterizedMessage("[{}] Exception during cleanup of root level blobs", metadata.name()), e); } + return Collections.emptyList(); } - private void cleanupStaleIndices(Map foundIndices, Map survivingIndices) { + private DeleteResult cleanupStaleIndices(Map foundIndices, Set survivingIndexIds) { + DeleteResult deleteResult = DeleteResult.ZERO; try { - final Set survivingIndexIds = survivingIndices.values().stream() - .map(IndexId::getId).collect(Collectors.toSet()); for (Map.Entry indexEntry : foundIndices.entrySet()) { final String indexSnId = indexEntry.getKey(); try { if (survivingIndexIds.contains(indexSnId) == false) { logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexSnId); - indexEntry.getValue().delete(); + deleteResult = deleteResult.add(indexEntry.getValue().delete()); logger.debug("[{}] Cleaned up stale index [{}]", metadata.name(), indexSnId); } } catch (IOException e) { @@ -527,6 +577,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp assert false : e; logger.warn(new ParameterizedMessage("[{}] Exception during cleanup of stale indices", metadata.name()), e); } + return deleteResult; } private void deleteIndices(RepositoryData repositoryData, List indices, SnapshotId snapshotId, @@ -869,7 +920,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp final BlobContainer shardContainer = shardContainer(indexId, shardId); final Map blobs; try { - blobs = shardContainer.listBlobs(); + blobs = shardContainer.listBlobsByPrefix(INDEX_FILE_PREFIX); } catch (IOException e) { throw new IndexShardSnapshotFailedException(shardId, "failed to list blobs", e); } @@ -914,7 +965,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp List filesInfo = snapshots.findPhysicalIndexFiles(fileName); if (filesInfo != null) { for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) { - if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) { + if (fileInfo.isSame(md)) { // a commit point file with the same name, size and checksum was already copied to repository // we will reuse it for this snapshot existingFileInfo = fileInfo; @@ -1185,23 +1236,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } else if (blobKeys.isEmpty() == false) { logger.warn("Could not find a readable index-N file in a non-empty shard snapshot directory [{}]", shardContainer.path()); } - - // We couldn't load the index file - falling back to loading individual snapshots - List snapshots = new ArrayList<>(); - for (String name : blobKeys) { - try { - BlobStoreIndexShardSnapshot snapshot = null; - if (name.startsWith(SNAPSHOT_PREFIX)) { - snapshot = indexShardSnapshotFormat.readBlob(shardContainer, name); - } - if (snapshot != null) { - snapshots.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles())); - } - } catch (IOException e) { - logger.warn(() -> new ParameterizedMessage("Failed to read blob [{}]", name), e); - } - } - return new Tuple<>(new BlobStoreIndexShardSnapshots(snapshots), latest); + return new Tuple<>(BlobStoreIndexShardSnapshots.EMPTY, latest); } /** diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java new file mode 100644 index 00000000000..3eca34ff2d3 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCleanupRepositoryAction.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; + +import static org.elasticsearch.client.Requests.cleanupRepositoryRequest; +import static org.elasticsearch.rest.RestRequest.Method.POST; + +/** + * Cleans up a repository + */ +public class RestCleanupRepositoryAction extends BaseRestHandler { + + public RestCleanupRepositoryAction(RestController controller) { + controller.registerHandler(POST, "/_snapshot/{repository}/_cleanup", this); + } + + @Override + public String getName() { + return "cleanup_repository_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + CleanupRepositoryRequest cleanupRepositoryRequest = cleanupRepositoryRequest(request.param("repository")); + cleanupRepositoryRequest.timeout(request.paramAsTime("timeout", cleanupRepositoryRequest.timeout())); + cleanupRepositoryRequest.masterNodeTimeout(request.paramAsTime("master_timeout", cleanupRepositoryRequest.masterNodeTimeout())); + return channel -> client.admin().cluster().cleanupRepository(cleanupRepositoryRequest, new RestToXContentListener<>(channel)); + } +} diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java index b46ad80e338..4fd43becf43 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java @@ -77,6 +77,7 @@ public class RestAliasAction extends AbstractCatAction { table.addCell("filter", "alias:f,fi;desc:filter"); table.addCell("routing.index", "alias:ri,routingIndex;desc:index routing"); table.addCell("routing.search", "alias:rs,routingSearch;desc:search routing"); + table.addCell("is_write_index", "alias:w,isWriteIndex;desc:write index"); table.endHeaders(); return table; } @@ -95,6 +96,8 @@ public class RestAliasAction extends AbstractCatAction { table.addCell(indexRouting); String searchRouting = Strings.hasLength(aliasMetaData.searchRouting()) ? aliasMetaData.searchRouting() : "-"; table.addCell(searchRouting); + String isWriteIndex = aliasMetaData.writeIndex() == null ? "-" : aliasMetaData.writeIndex().toString(); + table.addCell(isWriteIndex); table.endRow(); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/HttpChannelTaskHandler.java b/server/src/main/java/org/elasticsearch/rest/action/search/HttpChannelTaskHandler.java new file mode 100644 index 00000000000..efda0c55f28 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/rest/action/search/HttpChannelTaskHandler.java @@ -0,0 +1,155 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.search; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.HttpChannel; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicReference; + +/** + * This class executes a request and associates the corresponding {@link Task} with the {@link HttpChannel} that it was originated from, + * so that the tasks associated with a certain channel get cancelled when the underlying connection gets closed. + */ +public final class HttpChannelTaskHandler { + + public static final HttpChannelTaskHandler INSTANCE = new HttpChannelTaskHandler(); + //package private for testing + final Map httpChannels = new ConcurrentHashMap<>(); + + private HttpChannelTaskHandler() { + } + + void execute(NodeClient client, HttpChannel httpChannel, ActionRequest request, + ActionType actionType, ActionListener listener) { + + CloseListener closeListener = httpChannels.computeIfAbsent(httpChannel, channel -> new CloseListener(client)); + TaskHolder taskHolder = new TaskHolder(); + Task task = client.executeLocally(actionType, request, + new ActionListener() { + @Override + public void onResponse(Response searchResponse) { + try { + closeListener.unregisterTask(taskHolder); + } finally { + listener.onResponse(searchResponse); + } + } + + @Override + public void onFailure(Exception e) { + try { + closeListener.unregisterTask(taskHolder); + } finally { + listener.onFailure(e); + } + } + }); + closeListener.registerTask(taskHolder, new TaskId(client.getLocalNodeId(), task.getId())); + closeListener.maybeRegisterChannel(httpChannel); + } + + public int getNumChannels() { + return httpChannels.size(); + } + + final class CloseListener implements ActionListener { + private final Client client; + private final AtomicReference channel = new AtomicReference<>(); + private final Set taskIds = new HashSet<>(); + + CloseListener(Client client) { + this.client = client; + } + + int getNumTasks() { + return taskIds.size(); + } + + void maybeRegisterChannel(HttpChannel httpChannel) { + if (channel.compareAndSet(null, httpChannel)) { + //In case the channel is already closed when we register the listener, the listener will be immediately executed which will + //remove the channel from the map straight-away. That is why we first create the CloseListener and later we associate it + //with the channel. This guarantees that the close listener is already in the map when the it gets registered to its + //corresponding channel, hence it is always found in the map when it gets invoked if the channel gets closed. + httpChannel.addCloseListener(this); + } + } + + synchronized void registerTask(TaskHolder taskHolder, TaskId taskId) { + taskHolder.taskId = taskId; + if (taskHolder.completed == false) { + this.taskIds.add(taskId); + } + } + + synchronized void unregisterTask(TaskHolder taskHolder) { + if (taskHolder.taskId != null) { + this.taskIds.remove(taskHolder.taskId); + } + taskHolder.completed = true; + } + + @Override + public synchronized void onResponse(Void aVoid) { + //When the channel gets closed it won't be reused: we can remove it from the map and forget about it. + CloseListener closeListener = httpChannels.remove(channel.get()); + assert closeListener != null : "channel not found in the map of tracked channels"; + for (TaskId taskId : taskIds) { + ThreadContext threadContext = client.threadPool().getThreadContext(); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we stash any context here since this is an internal execution and should not leak any existing context information + threadContext.markAsSystemContext(); + ContextPreservingActionListener contextPreservingListener = new ContextPreservingActionListener<>( + threadContext.newRestorableContext(false), ActionListener.wrap(r -> {}, e -> {})); + CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(); + cancelTasksRequest.setTaskId(taskId); + //We don't wait for cancel tasks to come back. Task cancellation is just best effort. + client.admin().cluster().cancelTasks(cancelTasksRequest, contextPreservingListener); + } + } + } + + @Override + public void onFailure(Exception e) { + onResponse(null); + } + } + + private static class TaskHolder { + private TaskId taskId; + private boolean completed = false; + } +} diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 4e935211dba..20dbbd4b55c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -20,7 +20,9 @@ package org.elasticsearch.rest.action.search; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Booleans; @@ -107,7 +109,10 @@ public class RestSearchAction extends BaseRestHandler { request.withContentOrSourceParamParserOrNull(parser -> parseSearchRequest(searchRequest, request, parser, setSize)); - return channel -> client.search(searchRequest, new RestStatusToXContentListener<>(channel)); + return channel -> { + RestStatusToXContentListener listener = new RestStatusToXContentListener<>(channel); + HttpChannelTaskHandler.INSTANCE.execute(client, request.getHttpChannel(), searchRequest, SearchAction.INSTANCE, listener); + }; } /** diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 851233019ca..f313aff03b9 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -65,6 +65,7 @@ import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus.Stage; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportException; @@ -99,7 +100,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private final IndicesService indicesService; - private final SnapshotsService snapshotsService; + private final RepositoriesService repositoriesService; private final TransportService transportService; @@ -114,11 +115,11 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements private final SnapshotStateExecutor snapshotStateExecutor = new SnapshotStateExecutor(); private final UpdateSnapshotStatusAction updateSnapshotStatusHandler; - public SnapshotShardsService(Settings settings, ClusterService clusterService, SnapshotsService snapshotsService, + public SnapshotShardsService(Settings settings, ClusterService clusterService, RepositoriesService repositoriesService, ThreadPool threadPool, TransportService transportService, IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { this.indicesService = indicesService; - this.snapshotsService = snapshotsService; + this.repositoriesService = repositoriesService; this.transportService = transportService; this.clusterService = clusterService; this.threadPool = threadPool; @@ -361,7 +362,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements throw new IndexShardSnapshotFailedException(shardId, "shard didn't fully recover yet"); } - final Repository repository = snapshotsService.getRepositoriesService().repository(snapshot.getRepository()); + final Repository repository = repositoriesService.repository(snapshot.getRepository()); try { // we flush first to make sure we get the latest writes snapshotted try (Engine.IndexCommitRef snapshotRef = indexShard.acquireLastIndexCommit(true)) { diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index f819c550edf..e9b7915cf98 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.RepositoryCleanupInProgress; import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; @@ -264,6 +265,11 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, "cannot snapshot while a snapshot deletion is in-progress"); } + final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE); + if (repositoryCleanupInProgress != null && repositoryCleanupInProgress.cleanupInProgress() == false) { + throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, + "cannot snapshot while a repository cleanup is in-progress"); + } SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); if (snapshots == null || snapshots.entries().isEmpty()) { // Store newSnapshot here to be processed in clusterStateProcessed @@ -1133,6 +1139,11 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus throw new ConcurrentSnapshotExecutionException(snapshot, "cannot delete - another snapshot is currently being deleted"); } + final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE); + if (repositoryCleanupInProgress != null && repositoryCleanupInProgress.cleanupInProgress() == false) { + throw new ConcurrentSnapshotExecutionException(snapshot.getRepository(), snapshot.getSnapshotId().getName(), + "cannot delete snapshot while a repository cleanup is in-progress"); + } RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE); if (restoreInProgress != null) { // don't allow snapshot deletions while a restore is taking place, @@ -1457,8 +1468,4 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus protected void doClose() { clusterService.removeApplier(this); } - - public RepositoriesService getRepositoriesService() { - return repositoriesService; - } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index ca3888aa7f8..e1ac076d2ad 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -203,7 +203,7 @@ public class ClusterStatsIT extends ESIntegTestCase { public void testAllocatedProcessors() throws Exception { // start one node with 7 processors. - internalCluster().startNode(Settings.builder().put(EsExecutors.PROCESSORS_SETTING.getKey(), 7).build()); + internalCluster().startNode(Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 7).build()); waitForNodes(1); ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); diff --git a/server/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java b/server/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java index 1f0a953a707..fb6925ba2eb 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.monitor.jvm.JvmInfo; import java.nio.file.Path; import java.util.Locale; -import java.util.function.Consumer; +import java.util.function.BiConsumer; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; @@ -53,15 +53,15 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase { private void runTestThatVersionIsMutuallyExclusiveToOtherOptions(String... args) throws Exception { runTestVersion( ExitCodes.USAGE, - output -> assertThat( - output, + (output, error) -> assertThat( + error, allOf(containsString("ERROR:"), containsString("are unavailable given other options on the command line"))), args); } private void runTestThatVersionIsReturned(String... args) throws Exception { - runTestVersion(ExitCodes.OK, output -> { + runTestVersion(ExitCodes.OK, (output, error) -> { assertThat(output, containsString("Version: " + Build.CURRENT.getQualifiedVersion())); final String expectedBuildOutput = String.format( Locale.ROOT, @@ -75,7 +75,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase { }, args); } - private void runTestVersion(int expectedStatus, Consumer outputConsumer, String... args) throws Exception { + private void runTestVersion(int expectedStatus, BiConsumer outputConsumer, String... args) throws Exception { runTest(expectedStatus, false, outputConsumer, (foreground, pidFile, quiet, esSettings) -> {}, args); } @@ -83,19 +83,19 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase { runTest( ExitCodes.USAGE, false, - output -> assertThat(output, containsString("Positional arguments not allowed, found [foo]")), + (output, error) -> assertThat(error, containsString("Positional arguments not allowed, found [foo]")), (foreground, pidFile, quiet, esSettings) -> {}, "foo"); runTest( ExitCodes.USAGE, false, - output -> assertThat(output, containsString("Positional arguments not allowed, found [foo, bar]")), + (output, error) -> assertThat(error, containsString("Positional arguments not allowed, found [foo, bar]")), (foreground, pidFile, quiet, esSettings) -> {}, "foo", "bar"); runTest( ExitCodes.USAGE, false, - output -> assertThat(output, containsString("Positional arguments not allowed, found [foo]")), + (output, error) -> assertThat(error, containsString("Positional arguments not allowed, found [foo]")), (foreground, pidFile, quiet, esSettings) -> {}, "-E", "foo=bar", "foo", "-E", "baz=qux"); } @@ -104,12 +104,12 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase { Path tmpDir = createTempDir(); Path pidFile = tmpDir.resolve("pid"); runPidFileTest(ExitCodes.USAGE, false, - output -> assertThat(output, containsString("Option p/pidfile requires an argument")), pidFile, "-p"); - runPidFileTest(ExitCodes.OK, true, output -> {}, pidFile, "-p", pidFile.toString()); - runPidFileTest(ExitCodes.OK, true, output -> {}, pidFile, "--pidfile", tmpDir.toString() + "/pid"); + (output, error) -> assertThat(error, containsString("Option p/pidfile requires an argument")), pidFile, "-p"); + runPidFileTest(ExitCodes.OK, true, (output, error) -> {}, pidFile, "-p", pidFile.toString()); + runPidFileTest(ExitCodes.OK, true, (output, error) -> {}, pidFile, "--pidfile", tmpDir.toString() + "/pid"); } - private void runPidFileTest(final int expectedStatus, final boolean expectedInit, Consumer outputConsumer, + private void runPidFileTest(final int expectedStatus, final boolean expectedInit, BiConsumer outputConsumer, Path expectedPidFile, final String... args) throws Exception { runTest( @@ -130,7 +130,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase { runTest( ExitCodes.OK, true, - output -> {}, + (output, error) -> {}, (foreground, pidFile, quiet, esSettings) -> assertThat(foreground, equalTo(!expectedDaemonize)), args); } @@ -145,7 +145,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase { runTest( ExitCodes.OK, true, - output -> {}, + (output, error) -> {}, (foreground, pidFile, quiet, esSettings) -> assertThat(quiet, equalTo(expectedQuiet)), args); } @@ -154,7 +154,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase { runTest( ExitCodes.OK, true, - output -> {}, + (output, error) -> {}, (foreground, pidFile, quiet, env) -> { Settings settings = env.settings(); assertEquals("bar", settings.get("foo")); @@ -167,7 +167,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase { runTest( ExitCodes.USAGE, false, - output -> assertThat(output, containsString("setting [foo] must not be empty")), + (output, error) -> assertThat(error, containsString("setting [foo] must not be empty")), (foreground, pidFile, quiet, esSettings) -> {}, "-E", "foo="); } @@ -176,7 +176,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase { runTest( ExitCodes.USAGE, false, - output -> assertThat(output, containsString("setting [foo] already set, saw [bar] and [baz]")), + (output, error) -> assertThat(error, containsString("setting [foo] already set, saw [bar] and [baz]")), (foreground, pidFile, quiet, initialEnv) -> {}, "-E", "foo=bar", "-E", "foo=baz"); } @@ -185,7 +185,7 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase { runTest( ExitCodes.USAGE, false, - output -> assertThat(output, containsString("network.host is not a recognized option")), + (output, error) -> assertThat(error, containsString("network.host is not a recognized option")), (foreground, pidFile, quiet, esSettings) -> {}, "--network.host"); } diff --git a/server/src/test/java/org/elasticsearch/cli/CommandTests.java b/server/src/test/java/org/elasticsearch/cli/CommandTests.java index 2b2437eea65..092e5dfd480 100644 --- a/server/src/test/java/org/elasticsearch/cli/CommandTests.java +++ b/server/src/test/java/org/elasticsearch/cli/CommandTests.java @@ -110,6 +110,31 @@ public class CommandTests extends ESTestCase { assertFalse(command.executed); } + public void testUnknownOptions() throws Exception { + NoopCommand command = new NoopCommand(); + MockTerminal terminal = new MockTerminal(); + String[] args = {"-Z"}; + int status = command.main(args, terminal); + String output = terminal.getOutput(); + String error = terminal.getErrorOutput(); + assertEquals(output, ExitCodes.USAGE, status); + assertTrue(error, error.contains("Does nothing")); + assertFalse(output, output.contains("Some extra help")); // extra help not printed for usage errors + assertTrue(error, error.contains("ERROR: Z is not a recognized option")); + assertFalse(command.executed); + + command = new NoopCommand(); + String[] args2 = {"--foobar"}; + status = command.main(args2, terminal); + output = terminal.getOutput(); + error = terminal.getErrorOutput(); + assertEquals(output, ExitCodes.USAGE, status); + assertTrue(error, error.contains("Does nothing")); + assertFalse(output, output.contains("Some extra help")); // extra help not printed for usage errors + assertTrue(error, error.contains("ERROR: Z is not a recognized option")); + assertFalse(command.executed); + } + public void testVerbositySilentAndVerbose() throws Exception { MockTerminal terminal = new MockTerminal(); NoopCommand command = new NoopCommand(); @@ -155,8 +180,9 @@ public class CommandTests extends ESTestCase { String[] args = {}; int status = command.main(args, terminal); String output = terminal.getOutput(); + String error = terminal.getErrorOutput(); assertEquals(output, ExitCodes.DATA_ERROR, status); - assertTrue(output, output.contains("ERROR: Bad input")); + assertTrue(error, error.contains("ERROR: Bad input")); } public void testUsageError() throws Exception { @@ -165,9 +191,10 @@ public class CommandTests extends ESTestCase { String[] args = {}; int status = command.main(args, terminal); String output = terminal.getOutput(); + String error = terminal.getErrorOutput(); assertEquals(output, ExitCodes.USAGE, status); - assertTrue(output, output.contains("Throws a usage error")); - assertTrue(output, output.contains("ERROR: something was no good")); + assertTrue(error, error.contains("Throws a usage error")); + assertTrue(error, error.contains("ERROR: something was no good")); } } diff --git a/server/src/test/java/org/elasticsearch/cli/TerminalTests.java b/server/src/test/java/org/elasticsearch/cli/TerminalTests.java index 3b409c2add6..99bbe9d6184 100644 --- a/server/src/test/java/org/elasticsearch/cli/TerminalTests.java +++ b/server/src/test/java/org/elasticsearch/cli/TerminalTests.java @@ -41,6 +41,26 @@ public class TerminalTests extends ESTestCase { assertPrinted(terminal, Terminal.Verbosity.VERBOSE, "text"); } + public void testErrorVerbosity() throws Exception { + MockTerminal terminal = new MockTerminal(); + terminal.setVerbosity(Terminal.Verbosity.SILENT); + assertErrorPrinted(terminal, Terminal.Verbosity.SILENT, "text"); + assertErrorNotPrinted(terminal, Terminal.Verbosity.NORMAL, "text"); + assertErrorNotPrinted(terminal, Terminal.Verbosity.VERBOSE, "text"); + + terminal = new MockTerminal(); + assertErrorPrinted(terminal, Terminal.Verbosity.SILENT, "text"); + assertErrorPrinted(terminal, Terminal.Verbosity.NORMAL, "text"); + assertErrorNotPrinted(terminal, Terminal.Verbosity.VERBOSE, "text"); + + terminal = new MockTerminal(); + terminal.setVerbosity(Terminal.Verbosity.VERBOSE); + assertErrorPrinted(terminal, Terminal.Verbosity.SILENT, "text"); + assertErrorPrinted(terminal, Terminal.Verbosity.NORMAL, "text"); + assertErrorPrinted(terminal, Terminal.Verbosity.VERBOSE, "text"); + } + + public void testEscaping() throws Exception { MockTerminal terminal = new MockTerminal(); assertPrinted(terminal, Terminal.Verbosity.NORMAL, "This message contains percent like %20n"); @@ -87,4 +107,18 @@ public class TerminalTests extends ESTestCase { String output = logTerminal.getOutput(); assertTrue(output, output.isEmpty()); } + + private void assertErrorPrinted(MockTerminal logTerminal, Terminal.Verbosity verbosity, String text) throws Exception { + logTerminal.errorPrintln(verbosity, text); + String output = logTerminal.getErrorOutput(); + assertTrue(output, output.contains(text)); + logTerminal.reset(); + } + + private void assertErrorNotPrinted(MockTerminal logTerminal, Terminal.Verbosity verbosity, String text) throws Exception { + logTerminal.errorPrintln(verbosity, text); + String output = logTerminal.getErrorOutput(); + assertTrue(output, output.isEmpty()); + } + } diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java index 0f0350c4821..7e920a20c53 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/EsExecutorsTests.java @@ -19,10 +19,12 @@ package org.elasticsearch.common.util.concurrent; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matcher; +import java.util.Locale; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ThreadPoolExecutor; @@ -388,4 +390,32 @@ public class EsExecutorsTests extends ESTestCase { } } + public void testNodeProcessorsBound() { + runProcessorsBoundTest(EsExecutors.NODE_PROCESSORS_SETTING); + } + + public void testProcessorsBound() { + runProcessorsBoundTest(EsExecutors.PROCESSORS_SETTING); + } + + private void runProcessorsBoundTest(final Setting processorsSetting) { + final int available = Runtime.getRuntime().availableProcessors(); + final int processors = randomIntBetween(available + 1, Integer.MAX_VALUE); + final Settings settings = Settings.builder().put(processorsSetting.getKey(), processors).build(); + processorsSetting.get(settings); + final Setting[] deprecatedSettings; + if (processorsSetting.getProperties().contains(Setting.Property.Deprecated)) { + deprecatedSettings = new Setting[]{processorsSetting}; + } else { + deprecatedSettings = new Setting[0]; + } + final String expectedWarning = String.format( + Locale.ROOT, + "setting [%s] to value [%d] which is more than available processors [%d] is deprecated", + processorsSetting.getKey(), + processors, + available); + assertSettingDeprecationsAndWarnings(deprecatedSettings, expectedWarning); + } + } diff --git a/server/src/test/java/org/elasticsearch/http/HttpInfoTests.java b/server/src/test/java/org/elasticsearch/http/HttpInfoTests.java index db149bd6d0d..cd0cf7e1894 100644 --- a/server/src/test/java/org/elasticsearch/http/HttpInfoTests.java +++ b/server/src/test/java/org/elasticsearch/http/HttpInfoTests.java @@ -40,14 +40,30 @@ public class HttpInfoTests extends ESTestCase { new BoundTransportAddress( new TransportAddress[]{new TransportAddress(localhost, port)}, new TransportAddress(localhost, port) - ), 0L, true + ), 0L, false ), "localhost/" + NetworkAddress.format(localhost) + ':' + port ); } - public void hideCnameIfDeprecatedFormat() throws Exception { + public void testDeprecatedWarningIfPropertySpecified() throws Exception { InetAddress localhost = InetAddress.getByName("localhost"); int port = 9200; + assertPublishAddress( + new HttpInfo( + new BoundTransportAddress( + new TransportAddress[]{new TransportAddress(localhost, port)}, + new TransportAddress(localhost, port) + ), 0L, true + ), "localhost/" + NetworkAddress.format(localhost) + ':' + port + ); + assertWarnings( + "es.http.cname_in_publish_address system property is deprecated and no longer affects http.publish_address " + + "formatting. Remove this property to get rid of this deprecation warning."); + } + + public void testCorrectDisplayPublishedIp() throws Exception { + InetAddress localhost = InetAddress.getByName(NetworkAddress.format(InetAddress.getByName("localhost"))); + int port = 9200; assertPublishAddress( new HttpInfo( new BoundTransportAddress( @@ -58,26 +74,13 @@ public class HttpInfoTests extends ESTestCase { ); } - public void testCorrectDisplayPublishedIp() throws Exception { - InetAddress localhost = InetAddress.getByName(NetworkAddress.format(InetAddress.getByName("localhost"))); - int port = 9200; - assertPublishAddress( - new HttpInfo( - new BoundTransportAddress( - new TransportAddress[]{new TransportAddress(localhost, port)}, - new TransportAddress(localhost, port) - ), 0L, true - ), NetworkAddress.format(localhost) + ':' + port - ); - } - public void testCorrectDisplayPublishedIpv6() throws Exception { int port = 9200; TransportAddress localhost = new TransportAddress(InetAddress.getByName(NetworkAddress.format(InetAddress.getByName("0:0:0:0:0:0:0:1"))), port); assertPublishAddress( new HttpInfo( - new BoundTransportAddress(new TransportAddress[]{localhost}, localhost), 0L, true + new BoundTransportAddress(new TransportAddress[]{localhost}, localhost), 0L, false ), localhost.toString() ); } diff --git a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java index a4256c7e0cc..afa74512e31 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -396,15 +396,20 @@ public class IndexServiceTests extends ESSingleNodeTestCase { final Path translogPath = translog.getConfig().getTranslogPath(); final String translogUuid = translog.getTranslogUUID(); + int translogOps = 0; final int numDocs = scaledRandomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { client().prepareIndex().setIndex(indexName).setId(String.valueOf(i)).setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); + translogOps++; if (randomBoolean()) { client().admin().indices().prepareFlush(indexName).get(); + if (indexService.getIndexSettings().isSoftDeleteEnabled()) { + translogOps = 0; + } } } - assertThat(translog.totalOperations(), equalTo(numDocs)); - assertThat(translog.stats().estimatedNumberOfOperations(), equalTo(numDocs)); + assertThat(translog.totalOperations(), equalTo(translogOps)); + assertThat(translog.stats().estimatedNumberOfOperations(), equalTo(translogOps)); assertAcked(client().admin().indices().prepareClose("test").setWaitForActiveShards(ActiveShardCount.DEFAULT)); indexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(indexService.index()); diff --git a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java index b3e6557b187..c79c9268f24 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSettingsTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.translog.Translog; @@ -577,4 +578,64 @@ public class IndexSettingsTests extends ESTestCase { assertFalse(IndexSettings.INDEX_SOFT_DELETES_SETTING.get(settings)); } } + + public void testIgnoreTranslogRetentionSettingsIfSoftDeletesEnabled() { + Settings.Builder settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, Version.CURRENT)); + if (randomBoolean()) { + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomPositiveTimeValue()); + } + if (randomBoolean()) { + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), between(1, 1024) + "b"); + } + IndexMetaData metaData = newIndexMeta("index", settings.build()); + IndexSettings indexSettings = new IndexSettings(metaData, Settings.EMPTY); + assertThat(indexSettings.getTranslogRetentionAge().millis(), equalTo(-1L)); + assertThat(indexSettings.getTranslogRetentionSize().getBytes(), equalTo(-1L)); + + Settings.Builder newSettings = Settings.builder().put(settings.build()); + if (randomBoolean()) { + newSettings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomPositiveTimeValue()); + } + if (randomBoolean()) { + newSettings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), between(1, 1024) + "b"); + } + indexSettings.updateIndexMetaData(newIndexMeta("index", newSettings.build())); + assertThat(indexSettings.getTranslogRetentionAge().millis(), equalTo(-1L)); + assertThat(indexSettings.getTranslogRetentionSize().getBytes(), equalTo(-1L)); + } + + public void testUpdateTranslogRetentionSettingsWithSoftDeletesDisabled() { + Settings.Builder settings = Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); + + TimeValue ageSetting = TimeValue.timeValueHours(12); + if (randomBoolean()) { + ageSetting = randomBoolean() ? TimeValue.MINUS_ONE : TimeValue.timeValueMillis(randomIntBetween(0, 10000)); + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), ageSetting); + } + ByteSizeValue sizeSetting = new ByteSizeValue(512, ByteSizeUnit.MB); + if (randomBoolean()) { + sizeSetting = randomBoolean() ? new ByteSizeValue(-1) : new ByteSizeValue(randomIntBetween(0, 1024)); + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), sizeSetting); + } + IndexMetaData metaData = newIndexMeta("index", settings.build()); + IndexSettings indexSettings = new IndexSettings(metaData, Settings.EMPTY); + assertThat(indexSettings.getTranslogRetentionAge(), equalTo(ageSetting)); + assertThat(indexSettings.getTranslogRetentionSize(), equalTo(sizeSetting)); + + Settings.Builder newSettings = Settings.builder().put(settings.build()); + if (randomBoolean()) { + ageSetting = randomBoolean() ? TimeValue.MINUS_ONE : TimeValue.timeValueMillis(randomIntBetween(0, 10000)); + newSettings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), ageSetting); + } + if (randomBoolean()) { + sizeSetting = randomBoolean() ? new ByteSizeValue(-1) : new ByteSizeValue(randomIntBetween(0, 1024)); + newSettings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), sizeSetting); + } + indexSettings.updateIndexMetaData(newIndexMeta("index", newSettings.build())); + assertThat(indexSettings.getTranslogRetentionAge(), equalTo(ageSetting)); + assertThat(indexSettings.getTranslogRetentionSize(), equalTo(sizeSetting)); + } } diff --git a/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java b/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java index c3c2a8176e3..490c89485d3 100644 --- a/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/MergeSchedulerSettingsTests.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; -import static org.elasticsearch.common.util.concurrent.EsExecutors.PROCESSORS_SETTING; +import static org.elasticsearch.common.util.concurrent.EsExecutors.NODE_PROCESSORS_SETTING; import static org.elasticsearch.index.IndexSettingsTests.newIndexMeta; import static org.elasticsearch.index.MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING; import static org.elasticsearch.index.MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING; @@ -139,7 +139,7 @@ public class MergeSchedulerSettingsTests extends ESTestCase { builder.put(MAX_MERGE_COUNT_SETTING.getKey(), maxMergeCount); } if (numProc != -1) { - builder.put(PROCESSORS_SETTING.getKey(), numProc); + builder.put(NODE_PROCESSORS_SETTING.getKey(), numProc); } return newIndexMeta("index", builder.build()); } diff --git a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java index bd934f683fb..623bbe0ec50 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java @@ -169,13 +169,14 @@ public class NoOpEngineTests extends EngineTestCase { tracker.updateFromMaster(1L, Collections.singleton(allocationId.getId()), table); tracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); + boolean softDeleteEnabled = engine.config().getIndexSettings().isSoftDeleteEnabled(); final int numDocs = scaledRandomIntBetween(10, 3000); for (int i = 0; i < numDocs; i++) { engine.index(indexForDoc(createParsedDoc(Integer.toString(i), null))); + tracker.updateLocalCheckpoint(allocationId.getId(), i); if (rarely()) { engine.flush(); } - tracker.updateLocalCheckpoint(allocationId.getId(), i); } engine.flush(true, true); @@ -195,7 +196,7 @@ public class NoOpEngineTests extends EngineTestCase { } assertThat(Translog.readMinTranslogGeneration(translogPath, translogUuid), equalTo(minFileGeneration)); - assertThat(noOpEngine.getTranslogStats().estimatedNumberOfOperations(), equalTo(numDocs)); + assertThat(noOpEngine.getTranslogStats().estimatedNumberOfOperations(), equalTo(softDeleteEnabled ? 0 : numDocs)); assertThat(noOpEngine.getTranslogStats().getUncommittedOperations(), equalTo(0)); noOpEngine.trimUnreferencedTranslogFiles(); diff --git a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java index 506be95c225..c01aca80825 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/ReadOnlyEngineTests.java @@ -250,7 +250,7 @@ public class ReadOnlyEngineTests extends EngineTestCase { try (Store store = createStore()) { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); - + final boolean softDeletesEnabled = config.getIndexSettings().isSoftDeleteEnabled(); final int numDocs = frequently() ? scaledRandomIntBetween(10, 200) : 0; int uncommittedDocs = 0; @@ -259,16 +259,17 @@ public class ReadOnlyEngineTests extends EngineTestCase { ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); engine.index(new Engine.Index(newUid(doc), doc, i, primaryTerm.get(), 1, null, Engine.Operation.Origin.REPLICA, System.nanoTime(), -1, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); + globalCheckpoint.set(i); if (rarely()) { engine.flush(); uncommittedDocs = 0; } else { uncommittedDocs += 1; } - globalCheckpoint.set(i); } - assertThat(engine.getTranslogStats().estimatedNumberOfOperations(), equalTo(numDocs)); + assertThat(engine.getTranslogStats().estimatedNumberOfOperations(), + equalTo(softDeletesEnabled ? uncommittedDocs : numDocs)); assertThat(engine.getTranslogStats().getUncommittedOperations(), equalTo(uncommittedDocs)); assertThat(engine.getTranslogStats().getTranslogSizeInBytes(), greaterThan(0L)); assertThat(engine.getTranslogStats().getUncommittedSizeInBytes(), greaterThan(0L)); @@ -278,7 +279,7 @@ public class ReadOnlyEngineTests extends EngineTestCase { } try (ReadOnlyEngine readOnlyEngine = new ReadOnlyEngine(config, null, null, true, Function.identity())) { - assertThat(readOnlyEngine.getTranslogStats().estimatedNumberOfOperations(), equalTo(numDocs)); + assertThat(readOnlyEngine.getTranslogStats().estimatedNumberOfOperations(), equalTo(softDeletesEnabled ? 0 : numDocs)); assertThat(readOnlyEngine.getTranslogStats().getUncommittedOperations(), equalTo(0)); assertThat(readOnlyEngine.getTranslogStats().getTranslogSizeInBytes(), greaterThan(0L)); assertThat(readOnlyEngine.getTranslogStats().getUncommittedSizeInBytes(), greaterThan(0L)); diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 3809d002483..2817c51d33a 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -467,7 +467,12 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase shards.startReplicas(nReplica); for (IndexShard shard : shards) { try (Translog.Snapshot snapshot = getTranslog(shard).newSnapshot()) { - assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + // we flush at the end of peer recovery + if (shard.routingEntry().primary() || shard.indexSettings().isSoftDeleteEnabled() == false) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } else { + assertThat(snapshot.totalOperations(), equalTo(0)); + } } try (Translog.Snapshot snapshot = shard.getHistoryOperations("test", 0)) { assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); @@ -476,11 +481,16 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase // the failure replicated directly from the replication channel. indexResp = shards.index(new IndexRequest(index.getName(), "type", "any").source("{}", XContentType.JSON)); assertThat(indexResp.getFailure().getCause(), equalTo(indexException)); - expectedTranslogOps.add(new Translog.NoOp(1, primaryTerm, indexException.toString())); + Translog.NoOp noop2 = new Translog.NoOp(1, primaryTerm, indexException.toString()); + expectedTranslogOps.add(noop2); for (IndexShard shard : shards) { try (Translog.Snapshot snapshot = getTranslog(shard).newSnapshot()) { - assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + if (shard.routingEntry().primary() || shard.indexSettings().isSoftDeleteEnabled() == false) { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); + } else { + assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(Collections.singletonList(noop2))); + } } try (Translog.Snapshot snapshot = shard.getHistoryOperations("test", 0)) { assertThat(snapshot, SnapshotMatchers.containsOperationsInAnyOrder(expectedTranslogOps)); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index b3799e3fe76..c0bb63e2466 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -2136,11 +2136,13 @@ public class IndexShardTests extends IndexShardTestCase { /* This test just verifies that we fill up local checkpoint up to max seen seqID on primary recovery */ public void testRecoverFromStoreWithNoOps() throws IOException { - final IndexShard shard = newStartedShard(true); + final Settings settings = Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()).build(); + final IndexShard shard = newStartedShard(true, settings); indexDoc(shard, "_doc", "0"); indexDoc(shard, "_doc", "1"); // start a replica shard and index the second doc - final IndexShard otherShard = newStartedShard(false); + final IndexShard otherShard = newStartedShard(false, settings); updateMappings(otherShard, shard.indexSettings().getIndexMetaData()); SourceToParse sourceToParse = new SourceToParse(shard.shardId().getIndexName(), "_doc", "1", new BytesArray("{}"), XContentType.JSON); @@ -2179,7 +2181,7 @@ public class IndexShardTests extends IndexShardTestCase { newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null)); assertTrue(newShard.recoverFromStore()); try (Translog.Snapshot snapshot = getTranslog(newShard).newSnapshot()) { - assertThat(snapshot.totalOperations(), equalTo(2)); + assertThat(snapshot.totalOperations(), equalTo(newShard.indexSettings.isSoftDeleteEnabled() ? 0 : 2)); } } closeShards(newShard, shard); @@ -3801,7 +3803,13 @@ public class IndexShardTests extends IndexShardTestCase { engineResetLatch.await(); assertThat(getShardDocUIDs(shard), equalTo(docBelowGlobalCheckpoint)); assertThat(shard.seqNoStats().getMaxSeqNo(), equalTo(globalCheckpoint)); - assertThat(shard.translogStats().estimatedNumberOfOperations(), equalTo(translogStats.estimatedNumberOfOperations())); + if (shard.indexSettings.isSoftDeleteEnabled()) { + // we might have trimmed some operations if the translog retention policy is ignored (when soft-deletes enabled). + assertThat(shard.translogStats().estimatedNumberOfOperations(), + lessThanOrEqualTo(translogStats.estimatedNumberOfOperations())); + } else { + assertThat(shard.translogStats().estimatedNumberOfOperations(), equalTo(translogStats.estimatedNumberOfOperations())); + } assertThat(shard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(maxSeqNoBeforeRollback)); done.set(true); thread.join(); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java index f597d2adc80..e22acfca3ae 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceCloseTests.java @@ -70,7 +70,7 @@ public class IndicesServiceCloseTests extends ESTestCase { .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), createTempDir().getParent()) .put(Node.NODE_NAME_SETTING.getKey(), nodeName) .put(ScriptService.SCRIPT_MAX_COMPILATIONS_RATE.getKey(), "1000/1m") - .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created + .put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created .put("transport.type", getTestTransportType()) .put(Node.NODE_DATA_SETTING.getKey(), true) .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), random().nextLong()) diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index f169263a6cd..ae7d6f07183 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -36,6 +36,7 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -1488,4 +1489,20 @@ public class IndexRecoveryIT extends ESIntegTestCase { } ensureGreen(indexName); } + + public void testCancelRecoveryWithAutoExpandReplicas() throws Exception { + internalCluster().startMasterOnlyNode(); + assertAcked(client().admin().indices().prepareCreate("test") + .setSettings(Settings.builder().put(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "0-all")) + .setWaitForActiveShards(ActiveShardCount.NONE)); + internalCluster().startNode(); + internalCluster().startNode(); + client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + assertAcked(client().admin().indices().prepareDelete("test")); // cancel recoveries + assertBusy(() -> { + for (PeerRecoverySourceService recoveryService : internalCluster().getDataNodeInstances(PeerRecoverySourceService.class)) { + assertThat(recoveryService.numberOfOngoingRecoveries(), equalTo(0)); + } + }); + } } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 3b338ff824f..b340d8c52be 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -82,7 +82,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { shards.startAll(); final IndexShard replica = shards.getReplicas().get(0); boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); - assertThat(getTranslog(replica).totalOperations(), equalTo(softDeletesEnabled ? moreDocs : docs + moreDocs)); + assertThat(getTranslog(replica).totalOperations(), equalTo(softDeletesEnabled ? 0 : docs + moreDocs)); shards.assertAllEqual(docs + moreDocs); } } @@ -298,7 +298,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { // file based recovery should be made assertThat(newReplica.recoveryState().getIndex().fileDetails(), not(empty())); boolean softDeletesEnabled = replica.indexSettings().isSoftDeleteEnabled(); - assertThat(getTranslog(newReplica).totalOperations(), equalTo(softDeletesEnabled ? nonFlushedDocs : numDocs)); + assertThat(getTranslog(newReplica).totalOperations(), equalTo(softDeletesEnabled ? 0 : numDocs)); // history uuid was restored assertThat(newReplica.getHistoryUUID(), equalTo(historyUUID)); @@ -385,7 +385,12 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { shards.recoverReplica(newReplica); try (Translog.Snapshot snapshot = getTranslog(newReplica).newSnapshot()) { - assertThat("Sequence based recovery should keep existing translog", snapshot, SnapshotMatchers.size(initDocs + moreDocs)); + if (newReplica.indexSettings().isSoftDeleteEnabled()) { + assertThat(snapshot.totalOperations(), equalTo(0)); + } else { + assertThat("Sequence based recovery should keep existing translog", + snapshot, SnapshotMatchers.size(initDocs + moreDocs)); + } } assertThat(newReplica.recoveryState().getTranslog().recoveredOperations(), equalTo(uncommittedDocs + moreDocs)); assertThat(newReplica.recoveryState().getIndex().fileDetails(), empty()); diff --git a/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index ea447cc998b..e98fedbfcd7 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESIntegTestCase; @@ -352,11 +353,13 @@ public class OpenCloseIndexIT extends ESIntegTestCase { } } - public void testTranslogStats() { + public void testTranslogStats() throws Exception { final String indexName = "test"; createIndex(indexName, Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build()); + boolean softDeletesEnabled = IndexSettings.INDEX_SOFT_DELETES_SETTING.get( + client().admin().indices().prepareGetSettings(indexName).get().getIndexToSettings().get(indexName)); final int nbDocs = randomIntBetween(0, 50); int uncommittedOps = 0; @@ -372,17 +375,23 @@ public class OpenCloseIndexIT extends ESIntegTestCase { } } - IndicesStatsResponse stats = client().admin().indices().prepareStats(indexName).clear().setTranslog(true).get(); - assertThat(stats.getIndex(indexName), notNullValue()); - assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), equalTo(nbDocs)); - assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().getUncommittedOperations(), equalTo(uncommittedOps)); + final int uncommittedTranslogOps = uncommittedOps; + assertBusy(() -> { + IndicesStatsResponse stats = client().admin().indices().prepareStats(indexName).clear().setTranslog(true).get(); + assertThat(stats.getIndex(indexName), notNullValue()); + assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), equalTo( + softDeletesEnabled ? uncommittedTranslogOps : nbDocs)); + assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().getUncommittedOperations(), equalTo(uncommittedTranslogOps)); + }); assertAcked(client().admin().indices().prepareClose("test").setWaitForActiveShards(ActiveShardCount.ONE)); IndicesOptions indicesOptions = IndicesOptions.STRICT_EXPAND_OPEN_CLOSED; - stats = client().admin().indices().prepareStats(indexName).setIndicesOptions(indicesOptions).clear().setTranslog(true).get(); + IndicesStatsResponse stats = client().admin().indices().prepareStats(indexName) + .setIndicesOptions(indicesOptions).clear().setTranslog(true).get(); assertThat(stats.getIndex(indexName), notNullValue()); - assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), equalTo(nbDocs)); + assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), + equalTo(softDeletesEnabled ? 0 : nbDocs)); assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().getUncommittedOperations(), equalTo(0)); } } diff --git a/server/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java b/server/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java index f1dba4e58c6..615bf543127 100644 --- a/server/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java +++ b/server/src/test/java/org/elasticsearch/nodesinfo/SimpleNodesInfoIT.java @@ -114,8 +114,8 @@ public class SimpleNodesInfoIT extends ESIntegTestCase { public void testAllocatedProcessors() throws Exception { List nodesIds = internalCluster().startNodes( - Settings.builder().put(EsExecutors.PROCESSORS_SETTING.getKey(), 3).build(), - Settings.builder().put(EsExecutors.PROCESSORS_SETTING.getKey(), 6).build() + Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 3).build(), + Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 6).build() ); final String node_1 = nodesIds.get(0); diff --git a/server/src/test/java/org/elasticsearch/rest/action/search/HttpChannelTaskHandlerTests.java b/server/src/test/java/org/elasticsearch/rest/action/search/HttpChannelTaskHandlerTests.java new file mode 100644 index 00000000000..103981abdc4 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/rest/action/search/HttpChannelTaskHandlerTests.java @@ -0,0 +1,280 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.search; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.PlainListenableActionFuture; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.http.HttpChannel; +import org.elasticsearch.http.HttpResponse; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +public class HttpChannelTaskHandlerTests extends ESTestCase { + + private ThreadPool threadPool; + + @Before + public void createThreadPool() { + threadPool = new TestThreadPool(HttpChannelTaskHandlerTests.class.getName()); + } + + @After + public void stopThreadPool() { + ThreadPool.terminate(threadPool, 5, TimeUnit.SECONDS); + } + + /** + * This test verifies that no tasks are left in the map where channels and their corresponding tasks are tracked. + * Through the {@link TestClient} we simulate a scenario where the task may complete even before it has been + * associated with its corresponding channel. Either way, we need to make sure that no tasks are left in the map. + */ + public void testCompletedTasks() throws Exception { + try (TestClient testClient = new TestClient(Settings.EMPTY, threadPool, false)) { + HttpChannelTaskHandler httpChannelTaskHandler = HttpChannelTaskHandler.INSTANCE; + int initialHttpChannels = httpChannelTaskHandler.getNumChannels(); + int totalSearches = 0; + List> futures = new ArrayList<>(); + int numChannels = randomIntBetween(1, 30); + for (int i = 0; i < numChannels; i++) { + int numTasks = randomIntBetween(1, 30); + TestHttpChannel channel = new TestHttpChannel(); + totalSearches += numTasks; + for (int j = 0; j < numTasks; j++) { + PlainListenableActionFuture actionFuture = PlainListenableActionFuture.newListenableFuture(); + threadPool.generic().submit(() -> httpChannelTaskHandler.execute(testClient, channel, new SearchRequest(), + SearchAction.INSTANCE, actionFuture)); + futures.add(actionFuture); + } + } + for (Future future : futures) { + future.get(); + } + //no channels get closed in this test, hence we expect as many channels as we created in the map + assertEquals(initialHttpChannels + numChannels, httpChannelTaskHandler.getNumChannels()); + for (Map.Entry entry : httpChannelTaskHandler.httpChannels.entrySet()) { + assertEquals(0, entry.getValue().getNumTasks()); + } + assertEquals(totalSearches, testClient.searchRequests.get()); + } + } + + /** + * This test verifies the behaviour when the channel gets closed. The channel is expected to be + * removed and all of its corresponding tasks get cancelled. + */ + public void testCancelledTasks() throws Exception { + try (TestClient testClient = new TestClient(Settings.EMPTY, threadPool, true)) { + HttpChannelTaskHandler httpChannelTaskHandler = HttpChannelTaskHandler.INSTANCE; + int initialHttpChannels = httpChannelTaskHandler.getNumChannels(); + int numChannels = randomIntBetween(1, 30); + int totalSearches = 0; + List channels = new ArrayList<>(numChannels); + for (int i = 0; i < numChannels; i++) { + TestHttpChannel channel = new TestHttpChannel(); + channels.add(channel); + int numTasks = randomIntBetween(1, 30); + totalSearches += numTasks; + for (int j = 0; j < numTasks; j++) { + httpChannelTaskHandler.execute(testClient, channel, new SearchRequest(), SearchAction.INSTANCE, null); + } + assertEquals(numTasks, httpChannelTaskHandler.httpChannels.get(channel).getNumTasks()); + } + assertEquals(initialHttpChannels + numChannels, httpChannelTaskHandler.getNumChannels()); + for (TestHttpChannel channel : channels) { + channel.awaitClose(); + } + assertEquals(initialHttpChannels, httpChannelTaskHandler.getNumChannels()); + assertEquals(totalSearches, testClient.searchRequests.get()); + assertEquals(totalSearches, testClient.cancelledTasks.size()); + } + } + + /** + * This test verified what happens when a request comes through yet its corresponding http channel is already closed. + * The close listener is straight-away executed, the task is cancelled. This can even happen multiple times, it's the only case + * where we may end up registering a close listener multiple times to the channel, but the channel is already closed hence only + * the newly added listener will be invoked at registration time. + */ + public void testChannelAlreadyClosed() { + try (TestClient testClient = new TestClient(Settings.EMPTY, threadPool, true)) { + HttpChannelTaskHandler httpChannelTaskHandler = HttpChannelTaskHandler.INSTANCE; + int initialHttpChannels = httpChannelTaskHandler.getNumChannels(); + int numChannels = randomIntBetween(1, 30); + int totalSearches = 0; + for (int i = 0; i < numChannels; i++) { + TestHttpChannel channel = new TestHttpChannel(); + //no need to wait here, there will be no close listener registered, nothing to wait for. + channel.close(); + int numTasks = randomIntBetween(1, 5); + totalSearches += numTasks; + for (int j = 0; j < numTasks; j++) { + //here the channel will be first registered, then straight-away removed from the map as the close listener is invoked + httpChannelTaskHandler.execute(testClient, channel, new SearchRequest(), SearchAction.INSTANCE, null); + } + } + assertEquals(initialHttpChannels, httpChannelTaskHandler.getNumChannels()); + assertEquals(totalSearches, testClient.searchRequests.get()); + assertEquals(totalSearches, testClient.cancelledTasks.size()); + } + } + + private static class TestClient extends NodeClient { + private final AtomicLong counter = new AtomicLong(0); + private final Set cancelledTasks = new CopyOnWriteArraySet<>(); + private final AtomicInteger searchRequests = new AtomicInteger(0); + private final boolean timeout; + + TestClient(Settings settings, ThreadPool threadPool, boolean timeout) { + super(settings, threadPool); + this.timeout = timeout; + } + + @Override + public Task executeLocally(ActionType action, + Request request, + ActionListener listener) { + switch(action.name()) { + case CancelTasksAction.NAME: + CancelTasksRequest cancelTasksRequest = (CancelTasksRequest) request; + assertTrue("tried to cancel the same task more than once", cancelledTasks.add(cancelTasksRequest.getTaskId())); + Task task = request.createTask(counter.getAndIncrement(), "cancel_task", action.name(), null, Collections.emptyMap()); + if (randomBoolean()) { + listener.onResponse(null); + } else { + //test that cancel tasks is best effort, failure received are not propagated + listener.onFailure(new IllegalStateException()); + } + + return task; + case SearchAction.NAME: + searchRequests.incrementAndGet(); + Task searchTask = request.createTask(counter.getAndIncrement(), "search", action.name(), null, Collections.emptyMap()); + if (timeout == false) { + if (rarely()) { + //make sure that search is sometimes also called from the same thread before the task is returned + listener.onResponse(null); + } else { + threadPool().generic().submit(() -> listener.onResponse(null)); + } + } + return searchTask; + default: + throw new UnsupportedOperationException(); + } + + } + + @Override + public String getLocalNodeId() { + return "node"; + } + } + + private class TestHttpChannel implements HttpChannel { + private final AtomicBoolean open = new AtomicBoolean(true); + private final AtomicReference> closeListener = new AtomicReference<>(); + private final CountDownLatch closeLatch = new CountDownLatch(1); + + @Override + public void sendResponse(HttpResponse response, ActionListener listener) { + } + + @Override + public InetSocketAddress getLocalAddress() { + return null; + } + + @Override + public InetSocketAddress getRemoteAddress() { + return null; + } + + @Override + public void close() { + if (open.compareAndSet(true, false) == false) { + throw new IllegalStateException("channel already closed!"); + } + ActionListener listener = closeListener.get(); + if (listener != null) { + boolean failure = randomBoolean(); + threadPool.generic().submit(() -> { + if (failure) { + listener.onFailure(new IllegalStateException()); + } else { + listener.onResponse(null); + } + closeLatch.countDown(); + }); + } + } + + private void awaitClose() throws InterruptedException { + close(); + closeLatch.await(); + } + + @Override + public boolean isOpen() { + return open.get(); + } + + @Override + public void addCloseListener(ActionListener listener) { + //if the channel is already closed, the listener gets notified immediately, from the same thread. + if (open.get() == false) { + listener.onResponse(null); + } else { + if (closeListener.compareAndSet(null, listener) == false) { + throw new IllegalStateException("close listener already set, only one is allowed!"); + } + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index e44d0f4e2de..2e7ef6b3823 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -20,7 +20,6 @@ package org.elasticsearch.snapshots; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.SnapshotsInProgress; -import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -72,17 +71,13 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { @After public void assertRepoConsistency() { if (skipRepoConsistencyCheckReason == null) { - client().admin().cluster().prepareGetRepositories().get().repositories() - .stream() - .map(RepositoryMetaData::name) - .forEach(name -> { - final List snapshots = client().admin().cluster().prepareGetSnapshots(name).get().getSnapshots(); - // Delete one random snapshot to trigger repository cleanup. - if (snapshots.isEmpty() == false) { - client().admin().cluster().prepareDeleteSnapshot(name, randomFrom(snapshots).snapshotId().getName()).get(); - } - BlobStoreTestUtil.assertRepoConsistency(internalCluster(), name); - }); + client().admin().cluster().prepareGetRepositories().get().repositories().forEach(repositoryMetaData -> { + final String name = repositoryMetaData.name(); + if (repositoryMetaData.settings().getAsBoolean("readonly", false) == false) { + client().admin().cluster().prepareCleanupRepository(name).get(); + } + BlobStoreTestUtil.assertRepoConsistency(internalCluster(), name); + }); } else { logger.info("--> skipped repo consistency checks because [{}]", skipRepoConsistencyCheckReason); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index b86627e816d..8dbb2cbae42 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -488,13 +488,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest () -> client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap") .execute().actionGet()); - // TODO: Replace this by repository cleanup endpoint call once that's available logger.info("--> Go through a loop of creating and deleting a snapshot to trigger repository cleanup"); - client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-tmp") - .setWaitForCompletion(true) - .setIndices("test-idx") - .get(); - client().admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-tmp").get(); + client().admin().cluster().prepareCleanupRepository("test-repo").get(); // Subtract four files that will remain in the repository: // (1) index-(N+1) diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index a95ec8265c9..257c7eb3437 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -903,7 +903,7 @@ public class SnapshotResiliencyTests extends ESTestCase { final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); final ActionFilters actionFilters = new ActionFilters(emptySet()); snapshotShardsService = new SnapshotShardsService( - settings, clusterService, snapshotsService, threadPool, + settings, clusterService, repositoriesService, threadPool, transportService, indicesService, actionFilters, indexNameExpressionResolver); final ShardStateAction shardStateAction = new ShardStateAction( clusterService, transportService, allocationService, diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index 73864cd75e7..c29eb9ad86b 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -19,11 +19,14 @@ package org.elasticsearch.snapshots; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import java.util.List; @@ -71,4 +74,38 @@ public class SnapshotStatusApisIT extends AbstractSnapshotIntegTestCase { assertEquals(snStatus.getStats().getStartTime(), snapshotInfo.startTime()); assertEquals(snStatus.getStats().getTime(), snapshotInfo.endTime() - snapshotInfo.startTime()); } + + public void testStatusAPICallInProgressSnapshot() throws InterruptedException { + Client client = client(); + + logger.info("--> creating repository"); + assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("mock").setSettings( + Settings.builder().put("location", randomRepoPath()).put("block_on_data", true))); + + createIndex("test-idx-1"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index("test-idx-1", "_doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + + logger.info("--> snapshot"); + ActionFuture createSnapshotResponseActionFuture = + client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute(); + + logger.info("--> wait for data nodes to get blocked"); + waitForBlockOnAnyDataNode("test-repo", TimeValue.timeValueMinutes(1)); + + final List snapshotStatus = client.admin().cluster().snapshotsStatus( + new SnapshotsStatusRequest("test-repo", new String[]{"test-snap"})).actionGet().getSnapshots(); + assertEquals(snapshotStatus.get(0).getState(), SnapshotsInProgress.State.STARTED); + + logger.info("--> unblock all data nodes"); + unblockAllDataNodes("test-repo"); + + logger.info("--> wait for snapshot to finish"); + createSnapshotResponseActionFuture.actionGet(); + } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java index 15faecf46ca..c38ddb45ab8 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java @@ -21,6 +21,7 @@ package org.elasticsearch.snapshots.mockstore; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; import java.io.IOException; import java.io.InputStream; @@ -60,8 +61,8 @@ public class BlobContainerWrapper implements BlobContainer { } @Override - public void delete() throws IOException { - delegate.delete(); + public DeleteResult delete() throws IOException { + return delegate.delete(); } @Override diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java index bde2deaa642..9ea1079c3b8 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockEventuallyConsistentRepository.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -47,6 +48,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; import java.util.stream.Collectors; @@ -219,13 +221,20 @@ public class MockEventuallyConsistentRepository extends BlobStoreRepository { } @Override - public void delete() { + public DeleteResult delete() { ensureNotClosed(); final String thisPath = path.buildAsString(); + final AtomicLong bytesDeleted = new AtomicLong(0L); + final AtomicLong blobsDeleted = new AtomicLong(0L); synchronized (context.actions) { consistentView(context.actions).stream().filter(action -> action.path.startsWith(thisPath)) - .forEach(a -> context.actions.add(new BlobStoreAction(Operation.DELETE, a.path))); + .forEach(a -> { + context.actions.add(new BlobStoreAction(Operation.DELETE, a.path)); + bytesDeleted.addAndGet(a.data.length); + blobsDeleted.incrementAndGet(); + }); } + return new DeleteResult(blobsDeleted.get(), bytesDeleted.get()); } @Override diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index a552e7ac546..bd0a5cc772f 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.fs.FsBlobContainer; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Setting; @@ -330,14 +331,20 @@ public class MockRepository extends FsRepository { } @Override - public void delete() throws IOException { + public DeleteResult delete() throws IOException { + DeleteResult deleteResult = DeleteResult.ZERO; for (BlobContainer child : children().values()) { - child.delete(); + deleteResult = deleteResult.add(child.delete()); } - for (String blob : listBlobs().values().stream().map(BlobMetaData::name).collect(Collectors.toList())) { + final Map blobs = listBlobs(); + long deleteBlobCount = blobs.size(); + long deleteByteCount = 0L; + for (String blob : blobs.values().stream().map(BlobMetaData::name).collect(Collectors.toList())) { deleteBlobIgnoringIfNotExists(blob); + deleteByteCount += blobs.get(blob).length(); } blobStore().blobContainer(path().parent()).deleteBlob(path().toArray()[path().toArray().length - 1]); + return deleteResult.add(deleteBlobCount, deleteByteCount); } @Override diff --git a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java index d4e6f3693b7..3b297f9db6e 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ScalingThreadPoolTests.java @@ -54,7 +54,7 @@ public class ScalingThreadPoolTests extends ESThreadPoolTestCase { if (randomBoolean()) { final int processors = randomIntBetween(1, 64); maxBasedOnNumberOfProcessors = expectedSize(threadPoolName, processors); - builder.put("processors", processors); + builder.put("node.processors", processors); processorsUsed = processors; } else { maxBasedOnNumberOfProcessors = expectedSize(threadPoolName, availableProcessors); @@ -99,7 +99,7 @@ public class ScalingThreadPoolTests extends ESThreadPoolTestCase { }); if (processorsUsed > availableProcessors) { - assertWarnings("setting processors to value [" + processorsUsed + + assertWarnings("setting node.processors to value [" + processorsUsed + "] which is more than available processors [" + availableProcessors + "] is deprecated"); } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 2488551f7d6..de2c698b7bb 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -655,6 +655,7 @@ public class RemoteClusterConnectionTests extends ESTestCase { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/45845") public void testCloseWhileConcurrentlyConnecting() throws IOException, InterruptedException, BrokenBarrierException { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); diff --git a/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java index 62b2b422f78..d1a2cbb9cc4 100644 --- a/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/bootstrap/ESElasticsearchCliTestCase.java @@ -28,7 +28,7 @@ import org.elasticsearch.test.ESTestCase; import java.nio.file.Path; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; +import java.util.function.BiConsumer; import static org.hamcrest.CoreMatchers.equalTo; @@ -41,7 +41,7 @@ abstract class ESElasticsearchCliTestCase extends ESTestCase { void runTest( final int expectedStatus, final boolean expectedInit, - final Consumer outputConsumer, + final BiConsumer outputConsumer, final InitConsumer initConsumer, final String... args) throws Exception { final MockTerminal terminal = new MockTerminal(); @@ -69,11 +69,12 @@ abstract class ESElasticsearchCliTestCase extends ESTestCase { }, terminal); assertThat(status, equalTo(expectedStatus)); assertThat(init.get(), equalTo(expectedInit)); - outputConsumer.accept(terminal.getOutput()); + outputConsumer.accept(terminal.getOutput(), terminal.getErrorOutput()); } catch (Exception e) { // if an unexpected exception is thrown, we log // terminal output to aid debugging - logger.info(terminal.getOutput()); + logger.info("Stdout:\n" + terminal.getOutput()); + logger.info("Stderr:\n" + terminal.getErrorOutput()); // rethrow so the test fails throw e; } diff --git a/test/framework/src/main/java/org/elasticsearch/cli/MockTerminal.java b/test/framework/src/main/java/org/elasticsearch/cli/MockTerminal.java index 44c968cf507..cff5c1b49fb 100644 --- a/test/framework/src/main/java/org/elasticsearch/cli/MockTerminal.java +++ b/test/framework/src/main/java/org/elasticsearch/cli/MockTerminal.java @@ -33,8 +33,10 @@ import java.util.List; */ public class MockTerminal extends Terminal { - private final ByteArrayOutputStream buffer = new ByteArrayOutputStream(); - private final PrintWriter writer = new PrintWriter(new OutputStreamWriter(buffer, StandardCharsets.UTF_8)); + private final ByteArrayOutputStream stdoutBuffer = new ByteArrayOutputStream(); + private final ByteArrayOutputStream stderrBuffer = new ByteArrayOutputStream(); + private final PrintWriter writer = new PrintWriter(new OutputStreamWriter(stdoutBuffer, StandardCharsets.UTF_8)); + private final PrintWriter errorWriter = new PrintWriter(new OutputStreamWriter(stderrBuffer, StandardCharsets.UTF_8)); // A deque would be a perfect data structure for the FIFO queue of input values needed here. However, // to support the valid return value of readText being null (defined by Console), we need to be able @@ -73,6 +75,11 @@ public class MockTerminal extends Terminal { return writer; } + @Override + public PrintWriter getErrorWriter() { + return errorWriter; + } + /** Adds an an input that will be return from {@link #readText(String)}. Values are read in FIFO order. */ public void addTextInput(String input) { textInput.add(input); @@ -85,12 +92,18 @@ public class MockTerminal extends Terminal { /** Returns all output written to this terminal. */ public String getOutput() throws UnsupportedEncodingException { - return buffer.toString("UTF-8"); + return stdoutBuffer.toString("UTF-8"); + } + + /** Returns all output written to this terminal. */ + public String getErrorOutput() throws UnsupportedEncodingException { + return stderrBuffer.toString("UTF-8"); } /** Wipes the input and output. */ public void reset() { - buffer.reset(); + stdoutBuffer.reset(); + stderrBuffer.reset(); textIndex = 0; textInput.clear(); secretIndex = 0; diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java index b16a05f5b99..bcc961aaf03 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java @@ -19,6 +19,7 @@ package org.elasticsearch.repositories; import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.elasticsearch.action.support.PlainActionFuture; @@ -208,30 +209,49 @@ public abstract class AbstractThirdPartyRepositoryTestCase extends ESSingleNodeT .state(), equalTo(SnapshotState.SUCCESS)); - logger.info("--> creating a dangling index folder"); final BlobStoreRepository repo = (BlobStoreRepository) getInstanceFromNode(RepositoriesService.class).repository("test-repo"); - final PlainActionFuture future = PlainActionFuture.newFuture(); final Executor genericExec = repo.threadPool().executor(ThreadPool.Names.GENERIC); + + logger.info("--> creating a dangling index folder"); + + createDanglingIndex(repo, genericExec); + + logger.info("--> deleting a snapshot to trigger repository cleanup"); + client().admin().cluster().deleteSnapshot(new DeleteSnapshotRequest("test-repo", snapshotName)).actionGet(); + + assertConsistentRepository(repo, genericExec); + + logger.info("--> Create dangling index"); + createDanglingIndex(repo, genericExec); + + logger.info("--> Execute repository cleanup"); + final CleanupRepositoryResponse response = client().admin().cluster().prepareCleanupRepository("test-repo").get(); + assertCleanupResponse(response, 3L, 1L); + } + + protected void assertCleanupResponse(CleanupRepositoryResponse response, long bytes, long blobs) { + assertThat(response.result().blobs(), equalTo(1L + 2L)); + assertThat(response.result().bytes(), equalTo(3L + 2 * 3L)); + } + + private void createDanglingIndex(final BlobStoreRepository repo, final Executor genericExec) throws Exception { + final PlainActionFuture future = PlainActionFuture.newFuture(); genericExec.execute(new ActionRunnable(future) { @Override protected void doRun() throws Exception { final BlobStore blobStore = repo.blobStore(); blobStore.blobContainer(repo.basePath().add("indices").add("foo")) - .writeBlob("bar", new ByteArrayInputStream(new byte[0]), 0, false); + .writeBlob("bar", new ByteArrayInputStream(new byte[3]), 3, false); for (String prefix : Arrays.asList("snap-", "meta-")) { blobStore.blobContainer(repo.basePath()) - .writeBlob(prefix + "foo.dat", new ByteArrayInputStream(new byte[0]), 0, false); + .writeBlob(prefix + "foo.dat", new ByteArrayInputStream(new byte[3]), 3, false); } future.onResponse(null); } }); future.actionGet(); assertTrue(assertCorruptionVisible(repo, genericExec)); - logger.info("--> deleting a snapshot to trigger repository cleanup"); - client().admin().cluster().deleteSnapshot(new DeleteSnapshotRequest("test-repo", snapshotName)).actionGet(); - - assertConsistentRepository(repo, genericExec); } protected boolean assertCorruptionVisible(BlobStoreRepository repo, Executor executor) throws Exception { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index b8428a23554..2754b1ff414 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -125,6 +125,7 @@ import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.ScriptMetaData; +import org.elasticsearch.rest.action.search.HttpChannelTaskHandler; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.SearchHit; @@ -536,6 +537,9 @@ public abstract class ESIntegTestCase extends ESTestCase { restClient.close(); restClient = null; } + assertEquals(HttpChannelTaskHandler.INSTANCE.getNumChannels() + " channels still being tracked in " + + HttpChannelTaskHandler.class.getSimpleName() + " while there should be none", 0, + HttpChannelTaskHandler.INSTANCE.getNumChannels()); } private void afterInternal(boolean afterClass) throws Exception { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 9ae4b58ab98..a167419b9db 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -197,7 +197,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase { .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), createTempDir().getParent()) .put(Node.NODE_NAME_SETTING.getKey(), nodeName) .put(ScriptService.SCRIPT_MAX_COMPILATIONS_RATE.getKey(), "1000/1m") - .put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created + .put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created .put("transport.type", getTestTransportType()) .put(TransportSettings.PORT.getKey(), ESTestCase.getPortRange()) .put(Node.NODE_DATA_SETTING.getKey(), true) diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 90e82107bca..b04982ae92e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -470,7 +470,9 @@ public final class InternalTestCluster extends TestCluster { builder.put(SearchService.DEFAULT_KEEPALIVE_SETTING.getKey(), timeValueSeconds(100 + random.nextInt(5 * 60)).getStringRep()); } - builder.put(EsExecutors.PROCESSORS_SETTING.getKey(), 1 + random.nextInt(Math.min(4, Runtime.getRuntime().availableProcessors()))); + builder.put( + EsExecutors.NODE_PROCESSORS_SETTING.getKey(), + 1 + random.nextInt(Math.min(4, Runtime.getRuntime().availableProcessors()))); if (random.nextBoolean()) { if (random.nextBoolean()) { builder.put("indices.fielddata.cache.size", 1 + random.nextInt(1000), ByteSizeUnit.MB); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index b3840f96dcd..4513e5d98f7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -190,7 +190,7 @@ public abstract class ESRestTestCase extends ESTestCase { } return cluster; } - + /** * Helper class to check warnings in REST responses with sensitivity to versions * used in the target cluster. @@ -199,14 +199,14 @@ public abstract class ESRestTestCase extends ESTestCase { Set requiredSameVersionClusterWarnings = new HashSet<>(); Set allowedWarnings = new HashSet<>(); final Set testNodeVersions; - + public VersionSensitiveWarningsHandler(Set nodeVersions) { this.testNodeVersions = nodeVersions; } /** * Adds to the set of warnings that are all required in responses if the cluster - * is formed from nodes all running the exact same version as the client. + * is formed from nodes all running the exact same version as the client. * @param requiredWarnings a set of required warnings */ public void current(String... requiredWarnings) { @@ -214,11 +214,11 @@ public abstract class ESRestTestCase extends ESTestCase { } /** - * Adds to the set of warnings that are permissible (but not required) when running + * Adds to the set of warnings that are permissible (but not required) when running * in mixed-version clusters or those that differ in version from the test client. * @param allowedWarnings optional warnings that will be ignored if received */ - public void compatible(String... allowedWarnings) { + public void compatible(String... allowedWarnings) { this.allowedWarnings.addAll(Arrays.asList(allowedWarnings)); } @@ -239,15 +239,15 @@ public abstract class ESRestTestCase extends ESTestCase { return false; } } - + private boolean isExclusivelyTargetingCurrentVersionCluster() { assertFalse("Node versions running in the cluster are missing", testNodeVersions.isEmpty()); - return testNodeVersions.size() == 1 && + return testNodeVersions.size() == 1 && testNodeVersions.iterator().next().equals(Version.CURRENT); - } - + } + } - + public static RequestOptions expectVersionSpecificWarnings(Consumer expectationsSetter) { Builder builder = RequestOptions.DEFAULT.toBuilder(); VersionSensitiveWarningsHandler warningsHandler = new VersionSensitiveWarningsHandler(nodeVersions); @@ -513,14 +513,7 @@ public abstract class ESRestTestCase extends ESTestCase { if (preserveIndicesUponCompletion() == false) { // wipe indices - try { - adminClient().performRequest(new Request("DELETE", "*")); - } catch (ResponseException e) { - // 404 here just means we had no indexes - if (e.getResponse().getStatusLine().getStatusCode() != 404) { - throw e; - } - } + wipeAllIndices(); } // wipe index templates @@ -563,6 +556,20 @@ public abstract class ESRestTestCase extends ESTestCase { assertThat("Found in progress snapshots [" + inProgressSnapshots.get() + "].", inProgressSnapshots.get(), anEmptyMap()); } + protected static void wipeAllIndices() throws IOException { + try { + final Response response = adminClient().performRequest(new Request("DELETE", "*")); + try (InputStream is = response.getEntity().getContent()) { + assertTrue((boolean) XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true).get("acknowledged")); + } + } catch (ResponseException e) { + // 404 here just means we had no indexes + if (e.getResponse().getStatusLine().getStatusCode() != 404) { + throw e; + } + } + } + /** * Wipe fs snapshots we created one by one and all repositories so that the next test can create the repositories fresh and they'll * start empty. There isn't an API to delete all snapshots. There is an API to delete all snapshot repositories but that leaves all of diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 7f9fae23361..ca69c101181 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -1912,8 +1912,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { } public void testTimeoutPerConnection() throws IOException { - assumeTrue("Works only on BSD network stacks and apparently windows", - Constants.MAC_OS_X || Constants.FREE_BSD || Constants.WINDOWS); + assumeTrue("Works only on BSD network stacks", Constants.MAC_OS_X || Constants.FREE_BSD); try (ServerSocket socket = new MockServerSocket()) { // note - this test uses backlog=1 which is implementation specific ie. it might not work on some TCP/IP stacks // on linux (at least newer ones) the listen(addr, backlog=1) should just ignore new connections if the queue is full which diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportGetResourcesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportGetResourcesAction.java index 41e2605d9db..f4cc279aad7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportGetResourcesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/AbstractTransportGetResourcesAction.java @@ -80,6 +80,7 @@ public abstract class AbstractTransportGetResourcesAction implements XPackPlugin.XPackPersistentTaskParams { public static final String NAME = DataFrameField.TASK_NAME; - public static final ParseField VERSION = new ParseField(DataFrameField.VERSION); public static final ParseField FREQUENCY = DataFrameField.FREQUENCY; private final String transformId; @@ -36,7 +35,7 @@ public class DataFrameTransform extends AbstractDiffable imp static { PARSER.declareString(ConstructingObjectParser.constructorArg(), DataFrameField.ID); - PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), VERSION); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), DataFrameField.VERSION); PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FREQUENCY); } @@ -90,7 +89,7 @@ public class DataFrameTransform extends AbstractDiffable imp public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(DataFrameField.ID.getPreferredName(), transformId); - builder.field(VERSION.getPreferredName(), version); + builder.field(DataFrameField.VERSION.getPreferredName(), version); if (frequency != null) { builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java index fe31eaffbef..62865f5e1e5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/transforms/DataFrameTransformConfig.java @@ -47,8 +47,6 @@ public class DataFrameTransformConfig extends AbstractDiffable STRICT_PARSER = createParser(false); private static final ConstructingObjectParser LENIENT_PARSER = createParser(true); static final int MAX_DESCRIPTION_LENGTH = 1_000; @@ -98,8 +96,8 @@ public class DataFrameTransformConfig extends AbstractDiffable PivotConfig.fromXContent(p, lenient), PIVOT_TRANSFORM); parser.declareString(optionalConstructorArg(), DataFrameField.DESCRIPTION); parser.declareField(optionalConstructorArg(), - p -> TimeUtils.parseTimeFieldToInstant(p, CREATE_TIME.getPreferredName()), CREATE_TIME, ObjectParser.ValueType.VALUE); - parser.declareString(optionalConstructorArg(), VERSION); + p -> TimeUtils.parseTimeFieldToInstant(p, DataFrameField.CREATE_TIME.getPreferredName()), DataFrameField.CREATE_TIME, + ObjectParser.ValueType.VALUE); + parser.declareString(optionalConstructorArg(), DataFrameField.VERSION); return parser; } @@ -256,7 +255,7 @@ public class DataFrameTransformConfig extends AbstractDiffable { + public static final String NAME = "pinned"; + protected final QueryBuilder organicQuery; + protected final List ids; + protected static final ParseField IDS_FIELD = new ParseField("ids"); + protected static final ParseField ORGANIC_QUERY_FIELD = new ParseField("organic"); + + @Override + public String getWriteableName() { + return NAME; + } + + /** + * Creates a new PinnedQueryBuilder + */ + public PinnedQueryBuilder(QueryBuilder organicQuery, String... ids) { + if (organicQuery == null) { + throw new IllegalArgumentException("[" + NAME + "] organicQuery cannot be null"); + } + this.organicQuery = organicQuery; + if (ids == null) { + throw new IllegalArgumentException("[" + NAME + "] ids cannot be null"); + } + this.ids = new ArrayList<>(); + Collections.addAll(this.ids, ids); + + } + + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeStringCollection(this.ids); + out.writeNamedWriteable(organicQuery); + } + + /** + * @return the organic query set in the constructor + */ + public QueryBuilder organicQuery() { + return this.organicQuery; + } + + /** + * Returns the pinned ids for the query. + */ + public List ids() { + return Collections.unmodifiableList(this.ids); + } + + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + if (organicQuery != null) { + builder.field(ORGANIC_QUERY_FIELD.getPreferredName()); + organicQuery.toXContent(builder, params); + } + builder.startArray(IDS_FIELD.getPreferredName()); + for (String value : ids) { + builder.value(value); + } + builder.endArray(); + printBoostAndQueryName(builder); + builder.endObject(); + } + + @Override + protected Query doToQuery(QueryShardContext context) throws IOException { + throw new UnsupportedOperationException("Client side-only class for use in HLRC"); + } + + + @Override + protected int doHashCode() { + return Objects.hash(ids, organicQuery); + } + + @Override + protected boolean doEquals(PinnedQueryBuilder other) { + return Objects.equals(ids, other.ids) && Objects.equals(organicQuery, other.organicQuery) && boost == other.boost; + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java index 755d6faef0b..ca898f9cbf6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java @@ -13,6 +13,7 @@ import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackField; import java.io.IOException; +import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -27,16 +28,23 @@ public class MachineLearningFeatureSetUsage extends XPackFeatureSet.Usage { public static final String MODEL_SIZE = "model_size"; public static final String CREATED_BY = "created_by"; public static final String NODE_COUNT = "node_count"; + public static final String DATA_FRAME_ANALYTICS_JOBS_FIELD = "data_frame_analytics_jobs"; private final Map jobsUsage; private final Map datafeedsUsage; + private final Map analyticsUsage; private final int nodeCount; - public MachineLearningFeatureSetUsage(boolean available, boolean enabled, Map jobsUsage, - Map datafeedsUsage, int nodeCount) { + public MachineLearningFeatureSetUsage(boolean available, + boolean enabled, + Map jobsUsage, + Map datafeedsUsage, + Map analyticsUsage, + int nodeCount) { super(XPackField.MACHINE_LEARNING, available, enabled); this.jobsUsage = Objects.requireNonNull(jobsUsage); this.datafeedsUsage = Objects.requireNonNull(datafeedsUsage); + this.analyticsUsage = Objects.requireNonNull(analyticsUsage); this.nodeCount = nodeCount; } @@ -44,32 +52,37 @@ public class MachineLearningFeatureSetUsage extends XPackFeatureSet.Usage { super(in); this.jobsUsage = in.readMap(); this.datafeedsUsage = in.readMap(); + if (in.getVersion().onOrAfter(Version.V_7_4_0)) { + this.analyticsUsage = in.readMap(); + } else { + this.analyticsUsage = Collections.emptyMap(); + } if (in.getVersion().onOrAfter(Version.V_6_5_0)) { this.nodeCount = in.readInt(); } else { this.nodeCount = -1; } - } + } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeMap(jobsUsage); out.writeMap(datafeedsUsage); + if (out.getVersion().onOrAfter(Version.V_7_4_0)) { + out.writeMap(analyticsUsage); + } if (out.getVersion().onOrAfter(Version.V_6_5_0)) { out.writeInt(nodeCount); } - } + } @Override protected void innerXContent(XContentBuilder builder, Params params) throws IOException { super.innerXContent(builder, params); - if (jobsUsage != null) { - builder.field(JOBS_FIELD, jobsUsage); - } - if (datafeedsUsage != null) { - builder.field(DATAFEEDS_FIELD, datafeedsUsage); - } + builder.field(JOBS_FIELD, jobsUsage); + builder.field(DATAFEEDS_FIELD, datafeedsUsage); + builder.field(DATA_FRAME_ANALYTICS_JOBS_FIELD, analyticsUsage); if (nodeCount >= 0) { builder.field(NODE_COUNT, nodeCount); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java index 04b5d084a76..b3b2a3b6666 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java @@ -5,12 +5,14 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -20,14 +22,21 @@ import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.Evaluation; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.EvaluationMetricResult; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; +import org.elasticsearch.xpack.core.ml.utils.QueryProvider; import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.Objects; +import java.util.Optional; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; public class EvaluateDataFrameAction extends ActionType { @@ -41,14 +50,20 @@ public class EvaluateDataFrameAction extends ActionType PARSER = new ConstructingObjectParser<>(NAME, - a -> new Request((List) a[0], (Evaluation) a[1])); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + NAME, + a -> new Request((List) a[0], (QueryProvider) a[1], (Evaluation) a[2])); static { - PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), INDEX); - PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> parseEvaluation(p), EVALUATION); + PARSER.declareStringArray(constructorArg(), INDEX); + PARSER.declareObject( + optionalConstructorArg(), + (p, c) -> QueryProvider.fromXContent(p, true, Messages.DATA_FRAME_ANALYTICS_BAD_QUERY_FORMAT), + QUERY); + PARSER.declareObject(constructorArg(), (p, c) -> parseEvaluation(p), EVALUATION); } private static Evaluation parseEvaluation(XContentParser parser) throws IOException { @@ -64,19 +79,25 @@ public class EvaluateDataFrameAction extends ActionType indices, Evaluation evaluation) { + private Request(List indices, @Nullable QueryProvider queryProvider, Evaluation evaluation) { setIndices(indices); + setQueryProvider(queryProvider); setEvaluation(evaluation); } - public Request() { - } + public Request() {} public Request(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); + if (in.getVersion().onOrAfter(Version.V_7_4_0)) { + if (in.readBoolean()) { + queryProvider = QueryProvider.fromStream(in); + } + } evaluation = in.readNamedWriteable(Evaluation.class); } @@ -92,6 +113,14 @@ public class EvaluateDataFrameAction extends ActionType getQuery() { + // Visible for testing + Map getQuery() { return queryProvider.getQuery(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/Evaluation.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/Evaluation.java index c01c19e33e8..70f31273aba 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/Evaluation.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/Evaluation.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import java.util.List; @@ -25,8 +26,9 @@ public interface Evaluation extends ToXContentObject, NamedWriteable { /** * Builds the search required to collect data to compute the evaluation result + * @param queryBuilder User-provided query that must be respected when collecting data */ - SearchSourceBuilder buildSearch(); + SearchSourceBuilder buildSearch(QueryBuilder queryBuilder); /** * Computes the evaluation result diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java index 610c065fd81..bb2540a8691 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -106,10 +107,11 @@ public class Regression implements Evaluation { } @Override - public SearchSourceBuilder buildSearch() { + public SearchSourceBuilder buildSearch(QueryBuilder queryBuilder) { BoolQueryBuilder boolQuery = QueryBuilders.boolQuery() .filter(QueryBuilders.existsQuery(actualField)) - .filter(QueryBuilders.existsQuery(predictedField)); + .filter(QueryBuilders.existsQuery(predictedField)) + .filter(queryBuilder); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0).query(boolQuery); for (RegressionMetric metric : metrics) { List aggs = metric.aggs(actualField, predictedField); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/softclassification/BinarySoftClassification.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/softclassification/BinarySoftClassification.java index f594e7598fc..20731eba5e8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/softclassification/BinarySoftClassification.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/softclassification/BinarySoftClassification.java @@ -155,10 +155,12 @@ public class BinarySoftClassification implements Evaluation { } @Override - public SearchSourceBuilder buildSearch() { - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.size(0); - searchSourceBuilder.query(buildQuery()); + public SearchSourceBuilder buildSearch(QueryBuilder queryBuilder) { + BoolQueryBuilder boolQuery = QueryBuilders.boolQuery() + .filter(QueryBuilders.existsQuery(actualField)) + .filter(QueryBuilders.existsQuery(predictedProbabilityField)) + .filter(queryBuilder); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(0).query(boolQuery); for (SoftClassificationMetric metric : metrics) { List aggs = metric.aggs(actualField, Collections.singletonList(new BinaryClassInfo())); aggs.forEach(searchSourceBuilder::aggregation); @@ -166,13 +168,6 @@ public class BinarySoftClassification implements Evaluation { return searchSourceBuilder; } - private QueryBuilder buildQuery() { - BoolQueryBuilder boolQuery = QueryBuilders.boolQuery(); - boolQuery.filter(QueryBuilders.existsQuery(actualField)); - boolQuery.filter(QueryBuilders.existsQuery(predictedProbabilityField)); - return boolQuery; - } - @Override public void evaluate(SearchResponse searchResponse, ActionListener> listener) { if (searchResponse.getHits().getTotalHits().value == 0) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStore.java index 8b0d4750c32..ed42ccab6e6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStore.java @@ -9,19 +9,21 @@ package org.elasticsearch.xpack.core.slm.history; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasOrIndex; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import java.io.IOException; -import java.time.Instant; -import java.time.ZoneId; -import java.time.ZonedDateTime; import static org.elasticsearch.xpack.core.ilm.LifecycleSettings.SLM_HISTORY_INDEX_ENABLED_SETTING; import static org.elasticsearch.xpack.core.slm.history.SnapshotLifecycleTemplateRegistry.INDEX_TEMPLATE_VERSION; @@ -32,17 +34,17 @@ import static org.elasticsearch.xpack.core.slm.history.SnapshotLifecycleTemplate */ public class SnapshotHistoryStore { private static final Logger logger = LogManager.getLogger(SnapshotHistoryStore.class); - private static final DateFormatter indexTimeFormat = DateFormatter.forPattern("yyyy.MM"); public static final String SLM_HISTORY_INDEX_PREFIX = ".slm-history-" + INDEX_TEMPLATE_VERSION + "-"; + public static final String SLM_HISTORY_ALIAS = ".slm-history-" + INDEX_TEMPLATE_VERSION; private final Client client; - private final ZoneId timeZone; + private final ClusterService clusterService; private final boolean slmHistoryEnabled; - public SnapshotHistoryStore(Settings nodeSettings, Client client, ZoneId timeZone) { + public SnapshotHistoryStore(Settings nodeSettings, Client client, ClusterService clusterService) { this.client = client; - this.timeZone = timeZone; + this.clusterService = clusterService; slmHistoryEnabled = SLM_HISTORY_INDEX_ENABLED_SETTING.get(nodeSettings); } @@ -57,28 +59,84 @@ public class SnapshotHistoryStore { SLM_HISTORY_INDEX_ENABLED_SETTING.getKey(), item); return; } - final ZonedDateTime dateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(item.getTimestamp()), timeZone); - final String index = getHistoryIndexNameForTime(dateTime); - logger.trace("about to index snapshot history item in index [{}]: [{}]", index, item); - try (XContentBuilder builder = XContentFactory.jsonBuilder()) { - item.toXContent(builder, ToXContent.EMPTY_PARAMS); - IndexRequest request = new IndexRequest(index) - .source(builder); - client.index(request, ActionListener.wrap(indexResponse -> { - logger.debug("successfully indexed snapshot history item with id [{}] in index [{}]: [{}]", - indexResponse.getId(), index, item); - }, exception -> { + logger.trace("about to index snapshot history item in index [{}]: [{}]", SLM_HISTORY_ALIAS, item); + ensureHistoryIndex(client, clusterService.state(), ActionListener.wrap(createdIndex -> { + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + item.toXContent(builder, ToXContent.EMPTY_PARAMS); + IndexRequest request = new IndexRequest(SLM_HISTORY_ALIAS) + .source(builder); + client.index(request, ActionListener.wrap(indexResponse -> { + logger.debug("successfully indexed snapshot history item with id [{}] in index [{}]: [{}]", + indexResponse.getId(), SLM_HISTORY_ALIAS, item); + }, exception -> { + logger.error(new ParameterizedMessage("failed to index snapshot history item in index [{}]: [{}]", + SLM_HISTORY_ALIAS, item), exception); + })); + } catch (IOException exception) { logger.error(new ParameterizedMessage("failed to index snapshot history item in index [{}]: [{}]", - index, item), exception); - })); - } catch (IOException exception) { - logger.error(new ParameterizedMessage("failed to index snapshot history item in index [{}]: [{}]", - index, item), exception); + SLM_HISTORY_ALIAS, item), exception); + } + }, ex -> logger.error(new ParameterizedMessage("failed to ensure SLM history index exists, not indexing history item [{}]", + item), ex))); + } + + /** + * Checks if the SLM history index exists, and if not, creates it. + * + * @param client The client to use to create the index if needed + * @param state The current cluster state, to determine if the alias exists + * @param andThen Called after the index has been created. `onResponse` called with `true` if the index was created, + * `false` if it already existed. + */ + static void ensureHistoryIndex(Client client, ClusterState state, ActionListener andThen) { + final String initialHistoryIndexName = SLM_HISTORY_INDEX_PREFIX + "000001"; + final AliasOrIndex slmHistory = state.metaData().getAliasAndIndexLookup().get(SLM_HISTORY_ALIAS); + final AliasOrIndex initialHistoryIndex = state.metaData().getAliasAndIndexLookup().get(initialHistoryIndexName); + + if (slmHistory == null && initialHistoryIndex == null) { + // No alias or index exists with the expected names, so create the index with appropriate alias + client.admin().indices().prepareCreate(initialHistoryIndexName) + .setWaitForActiveShards(1) + .addAlias(new Alias(SLM_HISTORY_ALIAS) + .writeIndex(true)) + .execute(new ActionListener() { + @Override + public void onResponse(CreateIndexResponse response) { + andThen.onResponse(true); + } + + @Override + public void onFailure(Exception e) { + if (e instanceof ResourceAlreadyExistsException) { + // The index didn't exist before we made the call, there was probably a race - just ignore this + logger.debug("index [{}] was created after checking for its existence, likely due to a concurrent call", + initialHistoryIndexName); + andThen.onResponse(false); + } else { + andThen.onFailure(e); + } + } + }); + } else if (slmHistory == null) { + // alias does not exist but initial index does, something is broken + andThen.onFailure(new IllegalStateException("SLM history index [" + initialHistoryIndexName + + "] already exists but does not have alias [" + SLM_HISTORY_ALIAS + "]")); + } else if (slmHistory.isAlias() && slmHistory instanceof AliasOrIndex.Alias) { + if (((AliasOrIndex.Alias) slmHistory).getWriteIndex() != null) { + // The alias exists and has a write index, so we're good + andThen.onResponse(false); + } else { + // The alias does not have a write index, so we can't index into it + andThen.onFailure(new IllegalStateException("SLM history alias [" + SLM_HISTORY_ALIAS + "does not have a write index")); + } + } else if (slmHistory.isAlias() == false) { + // This is not an alias, error out + andThen.onFailure(new IllegalStateException("SLM history alias [" + SLM_HISTORY_ALIAS + + "] already exists as concrete index")); + } else { + logger.error("unexpected IndexOrAlias for [{}]: [{}]", SLM_HISTORY_ALIAS, slmHistory); + // (slmHistory.isAlias() == true) but (slmHistory instanceof Alias == false)? + assert false : SLM_HISTORY_ALIAS + " cannot be both an alias and not an alias simultaneously"; } } - - - static String getHistoryIndexNameForTime(ZonedDateTime time) { - return SLM_HISTORY_INDEX_PREFIX + indexTimeFormat.format(time); - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java index 539205e251f..3a9e9892d08 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -19,6 +19,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.common.socket.SocketAccess; import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; +import org.elasticsearch.xpack.core.watcher.WatcherField; import javax.net.ssl.HostnameVerifier; import javax.net.ssl.KeyManagerFactory; @@ -420,6 +421,7 @@ public class SSLService { sslSettingsMap.put("xpack.http.ssl", settings.getByPrefix("xpack.http.ssl.")); sslSettingsMap.putAll(getRealmsSSLSettings(settings)); sslSettingsMap.putAll(getMonitoringExporterSettings(settings)); + sslSettingsMap.put(WatcherField.EMAIL_NOTIFICATION_SSL_PREFIX, settings.getByPrefix(WatcherField.EMAIL_NOTIFICATION_SSL_PREFIX)); sslSettingsMap.forEach((key, sslSettings) -> loadConfiguration(key, sslSettings, sslContextHolders)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherField.java index b7ad6ee423d..4a8a7d39e0d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherField.java @@ -15,5 +15,7 @@ public final class WatcherField { public static final Setting ENCRYPTION_KEY_SETTING = SecureSetting.secureFile("xpack.watcher.encryption_key", null); + public static final String EMAIL_NOTIFICATION_SSL_PREFIX = "xpack.notification.email.ssl."; + private WatcherField() {} } diff --git a/x-pack/plugin/core/src/main/resources/security-index-template-7.json b/x-pack/plugin/core/src/main/resources/security-index-template-7.json index dae6462b7a6..8b4eed3bb1e 100644 --- a/x-pack/plugin/core/src/main/resources/security-index-template-7.json +++ b/x-pack/plugin/core/src/main/resources/security-index-template-7.json @@ -6,6 +6,7 @@ "number_of_replicas" : 0, "auto_expand_replicas" : "0-1", "index.priority": 1000, + "index.refresh_interval": "1s", "index.format": 6, "analysis" : { "filter" : { diff --git a/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json b/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json index 312d9ff9e3f..502daae3f79 100644 --- a/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json +++ b/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json @@ -6,6 +6,7 @@ "number_of_replicas" : 0, "auto_expand_replicas" : "0-1", "index.priority": 1000, + "index.refresh_interval": "1s", "index.format": 7 }, "mappings" : { diff --git a/x-pack/plugin/core/src/main/resources/slm-history-ilm-policy.json b/x-pack/plugin/core/src/main/resources/slm-history-ilm-policy.json index 8bccc4d23cb..febae00bc36 100644 --- a/x-pack/plugin/core/src/main/resources/slm-history-ilm-policy.json +++ b/x-pack/plugin/core/src/main/resources/slm-history-ilm-policy.json @@ -1,7 +1,15 @@ { "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "50GB", + "max_age": "30d" + } + } + }, "delete": { - "min_age": "60d", + "min_age": "90d", "actions": { "delete": {} } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameActionRequestTests.java index e93eb9b2013..77bb6f30e20 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameActionRequestTests.java @@ -7,26 +7,41 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.action.EvaluateDataFrameAction.Request; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.Evaluation; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.dataframe.evaluation.regression.RegressionTests; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.softclassification.BinarySoftClassificationTests; +import org.elasticsearch.xpack.core.ml.utils.QueryProvider; +import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; public class EvaluateDataFrameActionRequestTests extends AbstractSerializingTestCase { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { - return new NamedWriteableRegistry(new MlEvaluationNamedXContentProvider().getNamedWriteables()); + List namedWriteables = new ArrayList<>(); + namedWriteables.addAll(new MlEvaluationNamedXContentProvider().getNamedWriteables()); + namedWriteables.addAll(new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedWriteables()); + return new NamedWriteableRegistry(namedWriteables); } @Override protected NamedXContentRegistry xContentRegistry() { - return new NamedXContentRegistry(new MlEvaluationNamedXContentProvider().getNamedXContentParsers()); + List namedXContent = new ArrayList<>(); + namedXContent.addAll(new MlEvaluationNamedXContentProvider().getNamedXContentParsers()); + namedXContent.addAll(new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedXContents()); + return new NamedXContentRegistry(namedXContent); } @Override @@ -38,7 +53,18 @@ public class EvaluateDataFrameActionRequestTests extends AbstractSerializingTest indices.add(randomAlphaOfLength(10)); } request.setIndices(indices); - request.setEvaluation(BinarySoftClassificationTests.createRandom()); + QueryProvider queryProvider = null; + if (randomBoolean()) { + try { + queryProvider = QueryProvider.fromParsedQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); + } catch (IOException e) { + // Should never happen + throw new UncheckedIOException(e); + } + } + request.setQueryProvider(queryProvider); + Evaluation evaluation = randomBoolean() ? BinarySoftClassificationTests.createRandom() : RegressionTests.createRandom(); + request.setEvaluation(evaluation); return request; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RegressionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RegressionTests.java index d0bcc1a11f4..7f089ab18cd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RegressionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/RegressionTests.java @@ -10,11 +10,14 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -69,4 +72,20 @@ public class RegressionTests extends AbstractSerializingTestCase { () -> new Regression("foo", "bar", Collections.emptyList())); assertThat(e.getMessage(), equalTo("[regression] must have one or more metrics")); } + + public void testBuildSearch() { + Regression evaluation = new Regression("act", "prob", Arrays.asList(new MeanSquaredError())); + QueryBuilder userProvidedQuery = + QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("field_A", "some-value")) + .filter(QueryBuilders.termQuery("field_B", "some-other-value")); + QueryBuilder expectedSearchQuery = + QueryBuilders.boolQuery() + .filter(QueryBuilders.existsQuery("act")) + .filter(QueryBuilders.existsQuery("prob")) + .filter(QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("field_A", "some-value")) + .filter(QueryBuilders.termQuery("field_B", "some-other-value"))); + assertThat(evaluation.buildSearch(userProvidedQuery).query(), equalTo(expectedSearchQuery)); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/softclassification/BinarySoftClassificationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/softclassification/BinarySoftClassificationTests.java index 4f17df35367..6a589c0d055 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/softclassification/BinarySoftClassificationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/softclassification/BinarySoftClassificationTests.java @@ -10,11 +10,14 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -76,4 +79,20 @@ public class BinarySoftClassificationTests extends AbstractSerializingTestCase new BinarySoftClassification("foo", "bar", Collections.emptyList())); assertThat(e.getMessage(), equalTo("[binary_soft_classification] must have one or more metrics")); } + + public void testBuildSearch() { + BinarySoftClassification evaluation = new BinarySoftClassification("act", "prob", Arrays.asList(new Precision(Arrays.asList(0.7)))); + QueryBuilder userProvidedQuery = + QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("field_A", "some-value")) + .filter(QueryBuilders.termQuery("field_B", "some-other-value")); + QueryBuilder expectedSearchQuery = + QueryBuilders.boolQuery() + .filter(QueryBuilders.existsQuery("act")) + .filter(QueryBuilders.existsQuery("prob")) + .filter(QueryBuilders.boolQuery() + .filter(QueryBuilders.termQuery("field_A", "some-value")) + .filter(QueryBuilders.termQuery("field_B", "some-other-value"))); + assertThat(evaluation.buildSearch(userProvidedQuery).query(), equalTo(expectedSearchQuery)); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStoreTests.java index aea24f0fdb1..59dd546cba2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStoreTests.java @@ -6,28 +6,45 @@ package org.elasticsearch.xpack.core.slm.history; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexAction; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import org.junit.After; +import org.junit.Assert; import org.junit.Before; -import java.time.Instant; -import java.time.ZoneOffset; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.awaitLatch; import static org.elasticsearch.xpack.core.ilm.LifecycleSettings.SLM_HISTORY_INDEX_ENABLED_SETTING; -import static org.elasticsearch.xpack.core.slm.history.SnapshotHistoryStore.getHistoryIndexNameForTime; -import static org.elasticsearch.xpack.core.slm.history.SnapshotLifecycleTemplateRegistry.INDEX_TEMPLATE_VERSION; +import static org.elasticsearch.xpack.core.slm.history.SnapshotHistoryStore.SLM_HISTORY_ALIAS; +import static org.elasticsearch.xpack.core.slm.history.SnapshotHistoryStore.SLM_HISTORY_INDEX_PREFIX; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.core.IsEqual.equalTo; @@ -41,7 +58,8 @@ public class SnapshotHistoryStoreTests extends ESTestCase { public void setup() { threadPool = new TestThreadPool(this.getClass().getName()); client = new SnapshotLifecycleTemplateRegistryTests.VerifyingClient(threadPool); - historyStore = new SnapshotHistoryStore(Settings.EMPTY, client, ZoneOffset.UTC); + ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); + historyStore = new SnapshotHistoryStore(Settings.EMPTY, client, clusterService); } @After @@ -53,7 +71,7 @@ public class SnapshotHistoryStoreTests extends ESTestCase { public void testNoActionIfDisabled() { Settings settings = Settings.builder().put(SLM_HISTORY_INDEX_ENABLED_SETTING.getKey(), false).build(); - SnapshotHistoryStore disabledHistoryStore = new SnapshotHistoryStore(settings, client, ZoneOffset.UTC); + SnapshotHistoryStore disabledHistoryStore = new SnapshotHistoryStore(settings, client, null); String policyId = randomAlphaOfLength(5); SnapshotLifecyclePolicy policy = randomSnapshotLifecyclePolicy(policyId); final long timestamp = randomNonNegativeLong(); @@ -61,7 +79,7 @@ public class SnapshotHistoryStoreTests extends ESTestCase { String snapshotId = policy.generateSnapshotName(context); SnapshotHistoryItem record = SnapshotHistoryItem.successRecord(timestamp, policy, snapshotId); - client.setVerifier((a,r,l) -> { + client.setVerifier((a, r, l) -> { fail("the history store is disabled, no action should have been taken"); return null; }); @@ -80,11 +98,14 @@ public class SnapshotHistoryStoreTests extends ESTestCase { AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { + if (action instanceof CreateIndexAction && request instanceof CreateIndexRequest) { + return new CreateIndexResponse(true, true, ((CreateIndexRequest) request).index()); + } calledTimes.incrementAndGet(); assertThat(action, instanceOf(IndexAction.class)); assertThat(request, instanceOf(IndexRequest.class)); IndexRequest indexRequest = (IndexRequest) request; - assertEquals(getHistoryIndexNameForTime(Instant.ofEpochMilli(timestamp).atZone(ZoneOffset.UTC)), indexRequest.index()); + assertEquals(SLM_HISTORY_ALIAS, indexRequest.index()); final String indexedDocument = indexRequest.source().utf8ToString(); assertThat(indexedDocument, containsString(policy.getId())); assertThat(indexedDocument, containsString(policy.getRepository())); @@ -98,9 +119,9 @@ public class SnapshotHistoryStoreTests extends ESTestCase { new ShardId(randomAlphaOfLength(5), randomAlphaOfLength(5), randomInt(100)), randomAlphaOfLength(5), randomAlphaOfLength(5), - randomLongBetween(1,1000), - randomLongBetween(1,1000), - randomLongBetween(1,1000), + randomLongBetween(1, 1000), + randomLongBetween(1, 1000), + randomLongBetween(1, 1000), randomBoolean()); }); @@ -115,11 +136,14 @@ public class SnapshotHistoryStoreTests extends ESTestCase { AtomicInteger calledTimes = new AtomicInteger(0); client.setVerifier((action, request, listener) -> { + if (action instanceof CreateIndexAction && request instanceof CreateIndexRequest) { + return new CreateIndexResponse(true, true, ((CreateIndexRequest) request).index()); + } calledTimes.incrementAndGet(); assertThat(action, instanceOf(IndexAction.class)); assertThat(request, instanceOf(IndexRequest.class)); IndexRequest indexRequest = (IndexRequest) request; - assertEquals(getHistoryIndexNameForTime(Instant.ofEpochMilli(timestamp).atZone(ZoneOffset.UTC)), indexRequest.index()); + assertEquals(SLM_HISTORY_ALIAS, indexRequest.index()); final String indexedDocument = indexRequest.source().utf8ToString(); assertThat(indexedDocument, containsString(policy.getId())); assertThat(indexedDocument, containsString(policy.getRepository())); @@ -135,9 +159,9 @@ public class SnapshotHistoryStoreTests extends ESTestCase { new ShardId(randomAlphaOfLength(5), randomAlphaOfLength(5), randomInt(100)), randomAlphaOfLength(5), randomAlphaOfLength(5), - randomLongBetween(1,1000), - randomLongBetween(1,1000), - randomLongBetween(1,1000), + randomLongBetween(1, 1000), + randomLongBetween(1, 1000), + randomLongBetween(1, 1000), randomBoolean()); }); @@ -146,13 +170,188 @@ public class SnapshotHistoryStoreTests extends ESTestCase { } } + public void testHistoryIndexNeedsCreation() throws InterruptedException { + ClusterState state = ClusterState.builder(new ClusterName(randomAlphaOfLength(5))) + .metaData(MetaData.builder()) + .build(); + + client.setVerifier((a, r, l) -> { + assertThat(a, instanceOf(CreateIndexAction.class)); + assertThat(r, instanceOf(CreateIndexRequest.class)); + CreateIndexRequest request = (CreateIndexRequest) r; + assertThat(request.aliases(), hasSize(1)); + request.aliases().forEach(alias -> { + assertThat(alias.name(), equalTo(SLM_HISTORY_ALIAS)); + assertTrue(alias.writeIndex()); + }); + return new CreateIndexResponse(true, true, request.index()); + }); + + CountDownLatch latch = new CountDownLatch(1); + SnapshotHistoryStore.ensureHistoryIndex(client, state, new LatchedActionListener<>(ActionListener.wrap( + Assert::assertTrue, + ex -> { + logger.error(ex); + fail("should have called onResponse, not onFailure"); + }), latch)); + + awaitLatch(latch, 10, TimeUnit.SECONDS); + } + + public void testHistoryIndexProperlyExistsAlready() throws InterruptedException { + ClusterState state = ClusterState.builder(new ClusterName(randomAlphaOfLength(5))) + .metaData(MetaData.builder() + .put(IndexMetaData.builder(SLM_HISTORY_INDEX_PREFIX + "000001") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(randomIntBetween(1,10)) + .numberOfReplicas(randomIntBetween(1,10)) + .putAlias(AliasMetaData.builder(SLM_HISTORY_ALIAS) + .writeIndex(true) + .build()))) + .build(); + + client.setVerifier((a, r, l) -> { + fail("no client calls should have been made"); + return null; + }); + + CountDownLatch latch = new CountDownLatch(1); + SnapshotHistoryStore.ensureHistoryIndex(client, state, new LatchedActionListener<>(ActionListener.wrap( + Assert::assertFalse, + ex -> { + logger.error(ex); + fail("should have called onResponse, not onFailure"); + }), latch)); + + awaitLatch(latch, 10, TimeUnit.SECONDS); + } + + public void testHistoryIndexHasNoWriteIndex() throws InterruptedException { + ClusterState state = ClusterState.builder(new ClusterName(randomAlphaOfLength(5))) + .metaData(MetaData.builder() + .put(IndexMetaData.builder(SLM_HISTORY_INDEX_PREFIX + "000001") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(randomIntBetween(1,10)) + .numberOfReplicas(randomIntBetween(1,10)) + .putAlias(AliasMetaData.builder(SLM_HISTORY_ALIAS) + .build())) + .put(IndexMetaData.builder(randomAlphaOfLength(5)) + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(randomIntBetween(1,10)) + .numberOfReplicas(randomIntBetween(1,10)) + .putAlias(AliasMetaData.builder(SLM_HISTORY_ALIAS) + .build()))) + .build(); + + client.setVerifier((a, r, l) -> { + fail("no client calls should have been made"); + return null; + }); + + CountDownLatch latch = new CountDownLatch(1); + SnapshotHistoryStore.ensureHistoryIndex(client, state, new LatchedActionListener<>(ActionListener.wrap( + indexCreated -> fail("should have called onFailure, not onResponse"), + ex -> { + assertThat(ex, instanceOf(IllegalStateException.class)); + assertThat(ex.getMessage(), containsString("SLM history alias [" + SLM_HISTORY_ALIAS + + "does not have a write index")); + }), latch)); + + awaitLatch(latch, 10, TimeUnit.SECONDS); + } + + public void testHistoryIndexNotAlias() throws InterruptedException { + ClusterState state = ClusterState.builder(new ClusterName(randomAlphaOfLength(5))) + .metaData(MetaData.builder() + .put(IndexMetaData.builder(SLM_HISTORY_ALIAS) + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(randomIntBetween(1,10)) + .numberOfReplicas(randomIntBetween(1,10)))) + .build(); + + client.setVerifier((a, r, l) -> { + fail("no client calls should have been made"); + return null; + }); + + CountDownLatch latch = new CountDownLatch(1); + SnapshotHistoryStore.ensureHistoryIndex(client, state, new LatchedActionListener<>(ActionListener.wrap( + indexCreated -> fail("should have called onFailure, not onResponse"), + ex -> { + assertThat(ex, instanceOf(IllegalStateException.class)); + assertThat(ex.getMessage(), containsString("SLM history alias [" + SLM_HISTORY_ALIAS + + "] already exists as concrete index")); + }), latch)); + + awaitLatch(latch, 10, TimeUnit.SECONDS); + } + + public void testHistoryIndexCreatedConcurrently() throws InterruptedException { + ClusterState state = ClusterState.builder(new ClusterName(randomAlphaOfLength(5))) + .metaData(MetaData.builder()) + .build(); + + client.setVerifier((a, r, l) -> { + assertThat(a, instanceOf(CreateIndexAction.class)); + assertThat(r, instanceOf(CreateIndexRequest.class)); + CreateIndexRequest request = (CreateIndexRequest) r; + assertThat(request.aliases(), hasSize(1)); + request.aliases().forEach(alias -> { + assertThat(alias.name(), equalTo(SLM_HISTORY_ALIAS)); + assertTrue(alias.writeIndex()); + }); + throw new ResourceAlreadyExistsException("that index already exists"); + }); + + CountDownLatch latch = new CountDownLatch(1); + SnapshotHistoryStore.ensureHistoryIndex(client, state, new LatchedActionListener<>(ActionListener.wrap( + Assert::assertFalse, + ex -> { + logger.error(ex); + fail("should have called onResponse, not onFailure"); + }), latch)); + + awaitLatch(latch, 10, TimeUnit.SECONDS); + } + + public void testHistoryAliasDoesntExistButIndexDoes() throws InterruptedException { + final String initialIndex = SLM_HISTORY_INDEX_PREFIX + "000001"; + ClusterState state = ClusterState.builder(new ClusterName(randomAlphaOfLength(5))) + .metaData(MetaData.builder() + .put(IndexMetaData.builder(initialIndex) + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(randomIntBetween(1,10)) + .numberOfReplicas(randomIntBetween(1,10)))) + .build(); + + client.setVerifier((a, r, l) -> { + fail("no client calls should have been made"); + return null; + }); + + CountDownLatch latch = new CountDownLatch(1); + SnapshotHistoryStore.ensureHistoryIndex(client, state, new LatchedActionListener<>(ActionListener.wrap( + response -> { + logger.error(response); + fail("should have called onFailure, not onResponse"); + }, + ex -> { + assertThat(ex, instanceOf(IllegalStateException.class)); + assertThat(ex.getMessage(), containsString("SLM history index [" + initialIndex + + "] already exists but does not have alias [" + SLM_HISTORY_ALIAS + "]")); + }), latch)); + + awaitLatch(latch, 10, TimeUnit.SECONDS); + } + @SuppressWarnings("unchecked") private void assertContainsMap(String indexedDocument, Map map) { map.forEach((k, v) -> { assertThat(indexedDocument, containsString(k)); if (v instanceof Map) { assertContainsMap(indexedDocument, (Map) v); - } if (v instanceof Iterable) { + } + if (v instanceof Iterable) { ((Iterable) v).forEach(elem -> { assertThat(indexedDocument, containsString(elem.toString())); }); @@ -162,19 +361,6 @@ public class SnapshotHistoryStoreTests extends ESTestCase { }); } - - public void testIndexNameGeneration() { - String indexTemplateVersion = INDEX_TEMPLATE_VERSION; - assertThat(getHistoryIndexNameForTime(Instant.ofEpochMilli((long) 0).atZone(ZoneOffset.UTC)), - equalTo(".slm-history-"+ indexTemplateVersion +"-1970.01")); - assertThat(getHistoryIndexNameForTime(Instant.ofEpochMilli(100000000000L).atZone(ZoneOffset.UTC)), - equalTo(".slm-history-" + indexTemplateVersion + "-1973.03")); - assertThat(getHistoryIndexNameForTime(Instant.ofEpochMilli(1416582852000L).atZone(ZoneOffset.UTC)), - equalTo(".slm-history-" + indexTemplateVersion + "-2014.11")); - assertThat(getHistoryIndexNameForTime(Instant.ofEpochMilli(2833165811000L).atZone(ZoneOffset.UTC)), - equalTo(".slm-history-" + indexTemplateVersion + "-2059.10")); - } - public static SnapshotLifecyclePolicy randomSnapshotLifecyclePolicy(String id) { Map config = null; if (randomBoolean()) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotLifecycleTemplateRegistryTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotLifecycleTemplateRegistryTests.java index 7c116234471..02715c46926 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotLifecycleTemplateRegistryTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotLifecycleTemplateRegistryTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecyclePolicyMetadata; import org.elasticsearch.xpack.core.ilm.LifecycleType; import org.elasticsearch.xpack.core.ilm.OperationMode; +import org.elasticsearch.xpack.core.ilm.RolloverAction; import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType; import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; import org.junit.After; @@ -85,6 +86,7 @@ public class SnapshotLifecycleTemplateRegistryTests extends ESTestCase { entries.addAll(Arrays.asList( new NamedXContentRegistry.Entry(LifecycleType.class, new ParseField(TimeseriesLifecycleType.TYPE), (p) -> TimeseriesLifecycleType.INSTANCE), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse))); xContentRegistry = new NamedXContentRegistry(entries); registry = new SnapshotLifecycleTemplateRegistry(Settings.EMPTY, clusterService, threadPool, client, xContentRegistry); @@ -276,7 +278,11 @@ public class SnapshotLifecycleTemplateRegistryTests extends ESTestCase { protected void doExecute(ActionType action, Request request, ActionListener listener) { - listener.onResponse((Response) verifier.apply(action, request, listener)); + try { + listener.onResponse((Response) verifier.apply(action, request, listener)); + } catch (Exception e) { + listener.onFailure(e); + } } public VerifyingClient setVerifier(TriFunction, ActionRequest, ActionListener, ActionResponse> verifier) { diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java index 681599331c8..499f62f13ea 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameConfigurationIndexIT.java @@ -42,13 +42,13 @@ public class DataFrameConfigurationIndexIT extends DataFrameRestTestCase { builder.endObject(); final StringEntity entity = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); Request req = new Request("PUT", - DataFrameInternalIndex.INDEX_NAME + "/_doc/" + DataFrameTransformConfig.documentId(fakeTransformName)); + DataFrameInternalIndex.LATEST_INDEX_NAME + "/_doc/" + DataFrameTransformConfig.documentId(fakeTransformName)); req.setEntity(entity); client().performRequest(req); } // refresh the index - assertOK(client().performRequest(new Request("POST", DataFrameInternalIndex.INDEX_NAME + "/_refresh"))); + assertOK(client().performRequest(new Request("POST", DataFrameInternalIndex.LATEST_INDEX_NAME + "/_refresh"))); Request deleteRequest = new Request("DELETE", DATAFRAME_ENDPOINT + fakeTransformName); Response deleteResponse = client().performRequest(deleteRequest); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index 09a6f1ee56a..455009b4969 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -355,7 +355,7 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { public static void removeIndices() throws Exception { // we might have disabled wiping indices, but now its time to get rid of them // note: can not use super.cleanUpCluster() as this method must be static - wipeIndices(); + wipeAllIndices(); } public void wipeDataFrameTransforms() throws IOException { @@ -385,7 +385,7 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { assertTrue(transformConfigs.isEmpty()); // the configuration index should be empty - Request request = new Request("GET", DataFrameInternalIndex.INDEX_NAME + "/_search"); + Request request = new Request("GET", DataFrameInternalIndex.LATEST_INDEX_NAME + "/_search"); try { Response searchResponse = adminClient().performRequest(request); Map searchResult = entityAsMap(searchResponse); @@ -403,17 +403,6 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { waitForPendingTasks(adminClient(), taskName -> taskName.startsWith(DataFrameField.TASK_NAME) == false); } - protected static void wipeIndices() throws IOException { - try { - adminClient().performRequest(new Request("DELETE", "*")); - } catch (ResponseException e) { - // 404 here just means we had no indexes - if (e.getResponse().getStatusLine().getStatusCode() != 404) { - throw e; - } - } - } - static int getDataFrameCheckpoint(String transformId) throws IOException { Response statsResponse = client().performRequest(new Request("GET", DATAFRAME_ENDPOINT + transformId + "/_stats")); diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformInternalIndexIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformInternalIndexIT.java new file mode 100644 index 00000000000..a604062ce4a --- /dev/null +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTransformInternalIndexIT.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.integration; + +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; +import org.elasticsearch.client.dataframe.UpdateDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.UpdateDataFrameTransformResponse; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigUpdate; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; +import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.indices.CreateIndexRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Collections; + +import static org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex.addDataFrameTransformsConfigMappings; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.equalTo; + + +public class DataFrameTransformInternalIndexIT extends ESRestTestCase { + + + private static final String CURRENT_INDEX = DataFrameInternalIndex.LATEST_INDEX_NAME; + private static final String OLD_INDEX = DataFrameInternalIndex.INDEX_PATTERN + "1"; + + + public void testUpdateDeletesOldTransformConfig() throws Exception { + TestRestHighLevelClient client = new TestRestHighLevelClient(); + // The mapping does not need to actually be the "OLD" mapping, we are testing that the old doc gets deleted, and the new one + // created. + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + builder.startObject(); + builder.startObject("properties"); + builder.startObject(DataFrameField.INDEX_DOC_TYPE.getPreferredName()).field("type", "keyword").endObject(); + addDataFrameTransformsConfigMappings(builder); + builder.endObject(); + builder.endObject(); + client.indices().create(new CreateIndexRequest(OLD_INDEX).mapping(builder), RequestOptions.DEFAULT); + } + String transformIndex = "transform-index-deletes-old"; + createSourceIndex(transformIndex); + String transformId = "transform-update-deletes-old-transform-config"; + String config = "{\"dest\": {\"index\":\"bar\"}," + + " \"source\": {\"index\":\"" + transformIndex + "\", \"query\": {\"match_all\":{}}}," + + " \"id\": \""+transformId+"\"," + + " \"doc_type\": \"data_frame_transform_config\"," + + " \"pivot\": {" + + " \"group_by\": {" + + " \"reviewer\": {" + + " \"terms\": {" + + " \"field\": \"user_id\"" + + " } } }," + + " \"aggregations\": {" + + " \"avg_rating\": {" + + " \"avg\": {" + + " \"field\": \"stars\"" + + " } } } }," + + "\"frequency\":\"1s\"" + + "}"; + client.index(new IndexRequest(OLD_INDEX) + .id(DataFrameTransformConfig.documentId(transformId)) + .source(config, XContentType.JSON) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT); + GetResponse getResponse = client.get(new GetRequest(OLD_INDEX, DataFrameTransformConfig.documentId(transformId)), + RequestOptions.DEFAULT); + assertThat(getResponse.isExists(), is(true)); + + GetDataFrameTransformResponse response = client.dataFrame() + .getDataFrameTransform(new GetDataFrameTransformRequest(transformId), RequestOptions.DEFAULT); + assertThat(response.getTransformConfigurations().get(0).getId(), equalTo(transformId)); + + UpdateDataFrameTransformResponse updated = client.dataFrame().updateDataFrameTransform( + new UpdateDataFrameTransformRequest(DataFrameTransformConfigUpdate.builder().setDescription("updated").build(), transformId), + RequestOptions.DEFAULT); + + assertThat(updated.getTransformConfiguration().getId(), equalTo(transformId)); + assertThat(updated.getTransformConfiguration().getDescription(), equalTo("updated")); + + // Old should now be gone + getResponse = client.get(new GetRequest(OLD_INDEX, DataFrameTransformConfig.documentId(transformId)), RequestOptions.DEFAULT); + assertThat(getResponse.isExists(), is(false)); + + // New should be here + getResponse = client.get(new GetRequest(CURRENT_INDEX, DataFrameTransformConfig.documentId(transformId)), + RequestOptions.DEFAULT); + assertThat(getResponse.isExists(), is(true)); + } + + + @Override + protected Settings restClientSettings() { + final String token = "Basic " + + Base64.getEncoder().encodeToString(("x_pack_rest_user:x-pack-test-password").getBytes(StandardCharsets.UTF_8)); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + private void createSourceIndex(String index) throws IOException { + TestRestHighLevelClient client = new TestRestHighLevelClient(); + client.indices().create(new CreateIndexRequest(index), RequestOptions.DEFAULT); + } + + private class TestRestHighLevelClient extends RestHighLevelClient { + TestRestHighLevelClient() { + super(client(), restClient -> {}, new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedXContents()); + } + } +} diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java index 5fa81d52ca1..e936606d127 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameUsageIT.java @@ -54,7 +54,7 @@ public class DataFrameUsageIT extends DataFrameRestTestCase { stopDataFrameTransform("test_usage", false); Request statsExistsRequest = new Request("GET", - DataFrameInternalIndex.INDEX_NAME+"/_search?q=" + + DataFrameInternalIndex.LATEST_INDEX_NAME+"/_search?q=" + INDEX_DOC_TYPE.getPreferredName() + ":" + DataFrameTransformStoredDoc.NAME); // Verify that we have one stat document @@ -96,7 +96,7 @@ public class DataFrameUsageIT extends DataFrameRestTestCase { XContentMapValues.extractValue("data_frame.stats." + statName, statsMap)); } // Refresh the index so that statistics are searchable - refreshIndex(DataFrameInternalIndex.INDEX_TEMPLATE_NAME); + refreshIndex(DataFrameInternalIndex.LATEST_INDEX_VERSIONED_NAME); }, 60, TimeUnit.SECONDS); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java index df78c54b535..85e03744a05 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java @@ -198,7 +198,7 @@ public class DataFrame extends Plugin implements ActionPlugin, PersistentTaskPlu public UnaryOperator> getIndexTemplateMetaDataUpgrader() { return templates -> { try { - templates.put(DataFrameInternalIndex.INDEX_TEMPLATE_NAME, DataFrameInternalIndex.getIndexTemplateMetaData()); + templates.put(DataFrameInternalIndex.LATEST_INDEX_VERSIONED_NAME, DataFrameInternalIndex.getIndexTemplateMetaData()); } catch (IOException e) { logger.error("Error creating data frame index template", e); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java index 9071b744ebe..1e595b71502 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrameFeatureSet.java @@ -154,7 +154,7 @@ public class DataFrameFeatureSet implements XPackFeatureSet { } ); - SearchRequest totalTransformCount = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME) + SearchRequest totalTransformCount = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) .setTrackTotalHits(true) .setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() .filter(QueryBuilders.termQuery(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), DataFrameTransformConfig.NAME)))) @@ -196,7 +196,7 @@ public class DataFrameFeatureSet implements XPackFeatureSet { .filter(QueryBuilders.termQuery(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), DataFrameTransformStoredDoc.NAME))); - SearchRequestBuilder requestBuilder = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME) + SearchRequestBuilder requestBuilder = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) .setSize(0) .setQuery(queryBuilder); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java index 7d117b2bfb4..8fc03681b24 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsAction.java @@ -16,6 +16,8 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ClientHelper; @@ -56,7 +58,7 @@ public class TransportGetDataFrameTransformsAction extends AbstractTransportGetR @Override protected String[] getIndices() { - return new String[]{DataFrameInternalIndex.INDEX_NAME}; + return new String[]{DataFrameInternalIndex.INDEX_NAME_PATTERN}; } @Override @@ -84,4 +86,10 @@ public class TransportGetDataFrameTransformsAction extends AbstractTransportGetR protected QueryBuilder additionalQuery() { return QueryBuilders.termQuery(INDEX_DOC_TYPE.getPreferredName(), DataFrameTransformConfig.NAME); } + + @Override + protected SearchSourceBuilder customSearchOptions(SearchSourceBuilder searchSourceBuilder) { + return searchSourceBuilder.sort("_index", SortOrder.DESC); + } + } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java index 1b51a4ba4c3..872c9343a86 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java @@ -175,7 +175,7 @@ public class TransportStopDataFrameTransformAction extends TransportTasksAction< waitResponse -> client.admin() .indices() - .prepareRefresh(DataFrameInternalIndex.INDEX_NAME) + .prepareRefresh(DataFrameInternalIndex.LATEST_INDEX_NAME) .execute(ActionListener.wrap( r -> listener.onResponse(waitResponse), e -> { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportUpdateDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportUpdateDataFrameTransformAction.java index 84446e3755c..c521ecc677a 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportUpdateDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportUpdateDataFrameTransformAction.java @@ -6,6 +6,8 @@ package org.elasticsearch.xpack.dataframe.action; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -19,6 +21,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; @@ -58,6 +61,7 @@ import static org.elasticsearch.xpack.dataframe.action.TransportPutDataFrameTran public class TransportUpdateDataFrameTransformAction extends TransportMasterNodeAction { + private static final Logger logger = LogManager.getLogger(TransportUpdateDataFrameTransformAction.class); private final XPackLicenseState licenseState; private final Client client; private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; @@ -108,8 +112,6 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode DataFrameTransformConfigUpdate update = request.getUpdate(); update.setHeaders(filteredHeaders); - String transformId = request.getId(); - // GET transform and attempt to update // We don't want the update to complete if the config changed between GET and INDEX dataFrameTransformsConfigManager.getTransformConfigurationForUpdate(request.getId(), ActionListener.wrap( @@ -135,12 +137,12 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode private void handlePrivsResponse(String username, Request request, DataFrameTransformConfig config, - DataFrameTransformsConfigManager.SeqNoPrimaryTermPair seqNoPrimaryTermPair, + DataFrameTransformsConfigManager.SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, ClusterState clusterState, HasPrivilegesResponse privilegesResponse, ActionListener listener) { if (privilegesResponse.isCompleteMatch()) { - updateDataFrame(request, config, seqNoPrimaryTermPair, clusterState, listener); + updateDataFrame(request, config, seqNoPrimaryTermAndIndex, clusterState, listener); } else { List indices = privilegesResponse.getIndexPrivileges() .stream() @@ -158,7 +160,7 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode private void validateAndUpdateDataFrame(Request request, ClusterState clusterState, DataFrameTransformConfig config, - DataFrameTransformsConfigManager.SeqNoPrimaryTermPair seqNoPrimaryTermPair, + DataFrameTransformsConfigManager.SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, ActionListener listener) { try { SourceDestValidator.validate(config, clusterState, indexNameExpressionResolver, request.isDeferValidation()); @@ -173,17 +175,17 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode final String username = securityContext.getUser().principal(); HasPrivilegesRequest privRequest = buildPrivilegeCheck(config, indexNameExpressionResolver, clusterState, username); ActionListener privResponseListener = ActionListener.wrap( - r -> handlePrivsResponse(username, request, config, seqNoPrimaryTermPair, clusterState, r, listener), + r -> handlePrivsResponse(username, request, config, seqNoPrimaryTermAndIndex, clusterState, r, listener), listener::onFailure); client.execute(HasPrivilegesAction.INSTANCE, privRequest, privResponseListener); } else { // No security enabled, just create the transform - updateDataFrame(request, config, seqNoPrimaryTermPair, clusterState, listener); + updateDataFrame(request, config, seqNoPrimaryTermAndIndex, clusterState, listener); } } private void updateDataFrame(Request request, DataFrameTransformConfig config, - DataFrameTransformsConfigManager.SeqNoPrimaryTermPair seqNoPrimaryTermPair, + DataFrameTransformsConfigManager.SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, ClusterState clusterState, ActionListener listener) { @@ -193,7 +195,18 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode ActionListener putTransformConfigurationListener = ActionListener.wrap( putTransformConfigurationResult -> { auditor.info(config.getId(), "updated data frame transform."); - listener.onResponse(new Response(config)); + dataFrameTransformsConfigManager.deleteOldTransformConfigurations(request.getId(), ActionListener.wrap( + r -> { + logger.trace("[{}] successfully deleted old transform configurations", request.getId()); + listener.onResponse(new Response(config)); + }, + e -> { + logger.warn( + LoggerMessageFormat.format("[{}] failed deleting old transform configurations.", request.getId()), + e); + listener.onResponse(new Response(config)); + } + )); }, // If we failed to INDEX AND we created the destination index, the destination index will still be around // This is a similar behavior to _start @@ -203,7 +216,7 @@ public class TransportUpdateDataFrameTransformAction extends TransportMasterNode // <2> Update our transform ActionListener createDestinationListener = ActionListener.wrap( createDestResponse -> dataFrameTransformsConfigManager.updateTransformConfiguration(config, - seqNoPrimaryTermPair, + seqNoPrimaryTermAndIndex, putTransformConfigurationListener), listener::onFailure ); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java index 0307f4458a3..f1f5dd85b93 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameInternalIndex.java @@ -31,11 +31,23 @@ import static org.elasticsearch.xpack.core.dataframe.DataFrameField.TRANSFORM_ID public final class DataFrameInternalIndex { + /* Changelog of internal index versions + * + * Please list changes, increase the version if you are 1st in this release cycle + * + * version 1 (7.2): initial + * version 2 (7.4): cleanup, add config::version, config::create_time, checkpoint::timestamp, checkpoint::time_upper_bound, + * progress::docs_processed, progress::docs_indexed, + * stats::exponential_avg_checkpoint_duration_ms, stats::exponential_avg_documents_indexed, + * stats::exponential_avg_documents_processed + */ + // constants for the index - public static final String INDEX_TEMPLATE_VERSION = "1"; - public static final String INDEX_TEMPLATE_PATTERN = ".data-frame-internal-"; - public static final String INDEX_TEMPLATE_NAME = INDEX_TEMPLATE_PATTERN + INDEX_TEMPLATE_VERSION; - public static final String INDEX_NAME = INDEX_TEMPLATE_NAME; + public static final String INDEX_VERSION = "2"; + public static final String INDEX_PATTERN = ".data-frame-internal-"; + public static final String LATEST_INDEX_VERSIONED_NAME = INDEX_PATTERN + INDEX_VERSION; + public static final String LATEST_INDEX_NAME = LATEST_INDEX_VERSIONED_NAME; + public static final String INDEX_NAME_PATTERN = INDEX_PATTERN + "*"; public static final String AUDIT_TEMPLATE_VERSION = "1"; public static final String AUDIT_INDEX_PREFIX = ".data-frame-notifications-"; @@ -58,8 +70,8 @@ public final class DataFrameInternalIndex { public static final String KEYWORD = "keyword"; public static IndexTemplateMetaData getIndexTemplateMetaData() throws IOException { - IndexTemplateMetaData dataFrameTemplate = IndexTemplateMetaData.builder(INDEX_TEMPLATE_NAME) - .patterns(Collections.singletonList(INDEX_TEMPLATE_NAME)) + IndexTemplateMetaData dataFrameTemplate = IndexTemplateMetaData.builder(LATEST_INDEX_VERSIONED_NAME) + .patterns(Collections.singletonList(LATEST_INDEX_VERSIONED_NAME)) .version(Version.CURRENT.id) .settings(Settings.builder() // the configurations are expected to be small @@ -117,7 +129,7 @@ public final class DataFrameInternalIndex { return builder; } - private static XContentBuilder mappings() throws IOException { + public static XContentBuilder mappings() throws IOException { XContentBuilder builder = jsonBuilder(); builder.startObject(); @@ -134,6 +146,8 @@ public final class DataFrameInternalIndex { addDataFrameTransformsConfigMappings(builder); // add the schema for transform stats addDataFrameTransformStoredDocMappings(builder); + // add the schema for checkpoints + addDataFrameCheckpointMappings(builder); // end type builder.endObject(); // end properties @@ -226,15 +240,13 @@ public final class DataFrameInternalIndex { .field(TYPE, DOUBLE) .endObject() .endObject() - .endObject() + .endObject(); // This is obsolete and can be removed for future versions of the index, but is left here as a warning/reminder that // we cannot declare this field differently in version 1 of the internal index as it would cause a mapping clash - .startObject("checkpointing") - .field(ENABLED, false) - .endObject(); + // .startObject("checkpointing").field(ENABLED, false).endObject(); } - private static XContentBuilder addDataFrameTransformsConfigMappings(XContentBuilder builder) throws IOException { + public static XContentBuilder addDataFrameTransformsConfigMappings(XContentBuilder builder) throws IOException { return builder .startObject(DataFrameField.ID.getPreferredName()) .field(TYPE, KEYWORD) @@ -258,6 +270,22 @@ public final class DataFrameInternalIndex { .endObject() .startObject(DataFrameField.DESCRIPTION.getPreferredName()) .field(TYPE, TEXT) + .endObject() + .startObject(DataFrameField.VERSION.getPreferredName()) + .field(TYPE, KEYWORD) + .endObject() + .startObject(DataFrameField.CREATE_TIME.getPreferredName()) + .field(TYPE, DATE) + .endObject(); + } + + private static XContentBuilder addDataFrameCheckpointMappings(XContentBuilder builder) throws IOException { + return builder + .startObject(DataFrameField.TIMESTAMP_MILLIS.getPreferredName()) + .field(TYPE, DATE) + .endObject() + .startObject(DataFrameField.TIME_UPPER_BOUND_MILLIS.getPreferredName()) + .field(TYPE, DATE) .endObject(); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java index 2c8281eeab2..3d5c8b28aaa 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManager.java @@ -9,16 +9,18 @@ package org.elasticsearch.xpack.dataframe.persistence; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; -import org.elasticsearch.action.get.GetAction; -import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; @@ -37,8 +39,11 @@ import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.DeleteByQueryAction; import org.elasticsearch.index.reindex.DeleteByQueryRequest; +import org.elasticsearch.index.reindex.ScrollableHitSource; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.xpack.core.action.util.ExpandedIdsMatcher; @@ -54,12 +59,34 @@ import java.io.InputStream; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Set; import static org.elasticsearch.xpack.core.ClientHelper.DATA_FRAME_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; +/** + * Place of all interactions with the internal transforms index. For configuration and mappings see @link{DataFrameInternalIndex} + * + * Versioned Index: + * + * We wrap several indexes under 1 pattern: ".data-frame-internal-1", ".data-frame-internal-2", ".data-frame-internal-n" while + * n is the _current_ version of the index. + * + * - all gets/reads and dbq as well are searches on all indexes, while last-one-wins, so the result with the highest version is uses + * - all puts and updates go into the _current_ version of the index, in case of updates this can leave dups behind + * + * Duplicate handling / old version cleanup + * + * As we always write to the new index, updates of older documents leave a dup in the previous versioned index behind. However, + * documents are tiny, so the impact is rather small. + * + * Nevertheless cleanup would be good, eventually we need to move old documents into new indexes after major upgrades. + * + * TODO: Provide a method that moves old docs into the current index and delete old indexes and templates + */ public class DataFrameTransformsConfigManager { private static final Logger logger = LogManager.getLogger(DataFrameTransformsConfigManager.class); @@ -84,7 +111,7 @@ public class DataFrameTransformsConfigManager { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { XContentBuilder source = checkpoint.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); - IndexRequest indexRequest = new IndexRequest(DataFrameInternalIndex.INDEX_NAME) + IndexRequest indexRequest = new IndexRequest(DataFrameInternalIndex.LATEST_INDEX_NAME) .opType(DocWriteRequest.OpType.INDEX) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .id(DataFrameTransformCheckpoint.documentId(checkpoint.getTransformId(), checkpoint.getCheckpoint())) @@ -116,30 +143,91 @@ public class DataFrameTransformsConfigManager { * but is an index operation that will fail with a version conflict * if the current document seqNo and primaryTerm is not the same as the provided version. * @param transformConfig the @link{DataFrameTransformConfig} - * @param seqNoPrimaryTermPair an object containing the believed seqNo and primaryTerm for the doc. + * @param seqNoPrimaryTermAndIndex an object containing the believed seqNo, primaryTerm and index for the doc. * Used for optimistic concurrency control * @param listener listener to call after request */ public void updateTransformConfiguration(DataFrameTransformConfig transformConfig, - SeqNoPrimaryTermPair seqNoPrimaryTermPair, + SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, ActionListener listener) { - putTransformConfiguration(transformConfig, DocWriteRequest.OpType.INDEX, seqNoPrimaryTermPair, listener); + if (seqNoPrimaryTermAndIndex.getIndex().equals(DataFrameInternalIndex.LATEST_INDEX_NAME)) { + // update the config in the same, current index using optimistic concurrency control + putTransformConfiguration(transformConfig, DocWriteRequest.OpType.INDEX, seqNoPrimaryTermAndIndex, listener); + } else { + // create the config in the current version of the index assuming there is no existing one + // this leaves a dup behind in the old index, see dup handling on the top + putTransformConfiguration(transformConfig, DocWriteRequest.OpType.CREATE, null, listener); + } + } + + /** + * This deletes configuration documents that match the given transformId that are contained in old index versions. + * + * @param transformId The configuration ID potentially referencing configurations stored in the old indices + * @param listener listener to alert on completion + */ + public void deleteOldTransformConfigurations(String transformId, ActionListener listener) { + DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(DataFrameInternalIndex.INDEX_NAME_PATTERN) + .setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() + .mustNot(QueryBuilders.termQuery("_index", DataFrameInternalIndex.LATEST_INDEX_NAME)) + .filter(QueryBuilders.termQuery("_id", DataFrameTransformConfig.documentId(transformId))))) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()); + + executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, DeleteByQueryAction.INSTANCE, deleteByQueryRequest, ActionListener.wrap( + response -> { + if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { + Tuple statusAndReason = getStatusAndReason(response); + listener.onFailure( + new ElasticsearchStatusException(statusAndReason.v2().getMessage(), statusAndReason.v1(), statusAndReason.v2())); + return; + } + listener.onResponse(true); + }, + listener::onFailure + )); + } + + /** + * This deletes stored state/stats documents for the given transformId that are contained in old index versions. + * + * @param transformId The transform ID referenced by the documents + * @param listener listener to alert on completion + */ + public void deleteOldTransformStoredDocuments(String transformId, ActionListener listener) { + DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(DataFrameInternalIndex.INDEX_NAME_PATTERN) + .setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() + .mustNot(QueryBuilders.termQuery("_index", DataFrameInternalIndex.LATEST_INDEX_NAME)) + .filter(QueryBuilders.termQuery("_id", DataFrameTransformStoredDoc.documentId(transformId))))) + .setIndicesOptions(IndicesOptions.lenientExpandOpen()); + + executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, DeleteByQueryAction.INSTANCE, deleteByQueryRequest, ActionListener.wrap( + response -> { + if ((response.getBulkFailures().isEmpty() && response.getSearchFailures().isEmpty()) == false) { + Tuple statusAndReason = getStatusAndReason(response); + listener.onFailure( + new ElasticsearchStatusException(statusAndReason.v2().getMessage(), statusAndReason.v1(), statusAndReason.v2())); + return; + } + listener.onResponse(true); + }, + listener::onFailure + )); } private void putTransformConfiguration(DataFrameTransformConfig transformConfig, DocWriteRequest.OpType optType, - SeqNoPrimaryTermPair seqNoPrimaryTermPair, + SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, ActionListener listener) { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { XContentBuilder source = transformConfig.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); - IndexRequest indexRequest = new IndexRequest(DataFrameInternalIndex.INDEX_NAME) + IndexRequest indexRequest = new IndexRequest(DataFrameInternalIndex.LATEST_INDEX_NAME) .opType(optType) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .id(DataFrameTransformConfig.documentId(transformConfig.getId())) .source(source); - if (seqNoPrimaryTermPair != null) { - indexRequest.setIfSeqNo(seqNoPrimaryTermPair.seqNo).setIfPrimaryTerm(seqNoPrimaryTermPair.primaryTerm); + if (seqNoPrimaryTermAndIndex != null) { + indexRequest.setIfSeqNo(seqNoPrimaryTermAndIndex.seqNo).setIfPrimaryTerm(seqNoPrimaryTermAndIndex.primaryTerm); } executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap(r -> { listener.onResponse(true); @@ -170,19 +258,25 @@ public class DataFrameTransformsConfigManager { * @param resultListener listener to call after request has been made */ public void getTransformCheckpoint(String transformId, long checkpoint, ActionListener resultListener) { - GetRequest getRequest = new GetRequest(DataFrameInternalIndex.INDEX_NAME, - DataFrameTransformCheckpoint.documentId(transformId, checkpoint)); - executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, GetAction.INSTANCE, getRequest, ActionListener.wrap(getResponse -> { + QueryBuilder queryBuilder = QueryBuilders.termQuery("_id", DataFrameTransformCheckpoint.documentId(transformId, checkpoint)); + SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) + .setQuery(queryBuilder) + // use sort to get the last + .addSort("_index", SortOrder.DESC) + .setSize(1) + .request(); - if (getResponse.isExists() == false) { - // do not fail if checkpoint does not exist but return an empty checkpoint - logger.trace("found no checkpoint for transform [" + transformId + "], returning empty checkpoint"); - resultListener.onResponse(DataFrameTransformCheckpoint.EMPTY); - return; - } - BytesReference source = getResponse.getSourceAsBytesRef(); - parseCheckpointsLenientlyFromSource(source, transformId, resultListener); - }, resultListener::onFailure)); + executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap( + searchResponse -> { + if (searchResponse.getHits().getHits().length == 0) { + // do not fail if checkpoint does not exist but return an empty checkpoint + logger.trace("found no checkpoint for transform [" + transformId + "], returning empty checkpoint"); + resultListener.onResponse(DataFrameTransformCheckpoint.EMPTY); + return; + } + BytesReference source = searchResponse.getHits().getHits()[0].getSourceRef(); + parseCheckpointsLenientlyFromSource(source, transformId, resultListener); + }, resultListener::onFailure)); } /** @@ -193,24 +287,25 @@ public class DataFrameTransformsConfigManager { * @param resultListener listener to call after inner request has returned */ public void getTransformConfiguration(String transformId, ActionListener resultListener) { - GetRequest getRequest = new GetRequest(DataFrameInternalIndex.INDEX_NAME, DataFrameTransformConfig.documentId(transformId)); - executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, GetAction.INSTANCE, getRequest, ActionListener.wrap(getResponse -> { + QueryBuilder queryBuilder = QueryBuilders.termQuery("_id", DataFrameTransformConfig.documentId(transformId)); + SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) + .setQuery(queryBuilder) + // use sort to get the last + .addSort("_index", SortOrder.DESC) + .setSize(1) + .request(); - if (getResponse.isExists() == false) { - resultListener.onFailure(new ResourceNotFoundException( - DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); - return; - } - BytesReference source = getResponse.getSourceAsBytesRef(); - parseTransformLenientlyFromSource(source, transformId, resultListener); - }, e -> { - if (e.getClass() == IndexNotFoundException.class) { - resultListener.onFailure(new ResourceNotFoundException( - DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); - } else { - resultListener.onFailure(e); - } - })); + executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, SearchAction.INSTANCE, searchRequest, + ActionListener.wrap( + searchResponse -> { + if (searchResponse.getHits().getHits().length == 0) { + resultListener.onFailure(new ResourceNotFoundException( + DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); + return; + } + BytesReference source = searchResponse.getHits().getHits()[0].getSourceRef(); + parseTransformLenientlyFromSource(source, transformId, resultListener); + }, resultListener::onFailure)); } /** @@ -222,28 +317,30 @@ public class DataFrameTransformsConfigManager { */ public void getTransformConfigurationForUpdate(String transformId, ActionListener> configAndVersionListener) { - GetRequest getRequest = new GetRequest(DataFrameInternalIndex.INDEX_NAME, DataFrameTransformConfig.documentId(transformId)); - executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, GetAction.INSTANCE, getRequest, ActionListener.wrap(getResponse -> { + SeqNoPrimaryTermAndIndex>> configAndVersionListener) { + QueryBuilder queryBuilder = QueryBuilders.termQuery("_id", DataFrameTransformConfig.documentId(transformId)); + SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) + .setQuery(queryBuilder) + // use sort to get the last + .addSort("_index", SortOrder.DESC) + .setSize(1) + .seqNoAndPrimaryTerm(true) + .request(); - if (getResponse.isExists() == false) { - configAndVersionListener.onFailure(new ResourceNotFoundException( - DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); - return; - } - BytesReference source = getResponse.getSourceAsBytesRef(); - parseTransformLenientlyFromSource(source, transformId, ActionListener.wrap( - config -> configAndVersionListener.onResponse(Tuple.tuple(config, - new SeqNoPrimaryTermPair(getResponse.getSeqNo(), getResponse.getPrimaryTerm()))), - configAndVersionListener::onFailure)); - }, e -> { - if (e.getClass() == IndexNotFoundException.class) { - configAndVersionListener.onFailure(new ResourceNotFoundException( - DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); - } else { - configAndVersionListener.onFailure(e); - } - })); + executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap( + searchResponse -> { + if (searchResponse.getHits().getHits().length == 0) { + configAndVersionListener.onFailure(new ResourceNotFoundException( + DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); + return; + } + SearchHit hit = searchResponse.getHits().getHits()[0]; + BytesReference source = hit.getSourceRef(); + parseTransformLenientlyFromSource(source, transformId, ActionListener.wrap( + config -> configAndVersionListener.onResponse(Tuple.tuple(config, + new SeqNoPrimaryTermAndIndex(hit.getSeqNo(), hit.getPrimaryTerm(), hit.getIndex()))), + configAndVersionListener::onFailure)); + }, configAndVersionListener::onFailure)); } /** @@ -263,7 +360,7 @@ public class DataFrameTransformsConfigManager { String[] idTokens = ExpandedIdsMatcher.tokenizeExpression(transformIdsExpression); QueryBuilder queryBuilder = buildQueryFromTokenizedIds(idTokens, DataFrameTransformConfig.NAME); - SearchRequest request = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME) + SearchRequest request = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) .addSort(DataFrameField.ID.getPreferredName(), SortOrder.ASC) .setFrom(pageParams.getFrom()) .setTrackTotalHits(true) @@ -275,35 +372,33 @@ public class DataFrameTransformsConfigManager { final ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(idTokens, allowNoMatch); - executeAsyncWithOrigin(client.threadPool().getThreadContext(), DATA_FRAME_ORIGIN, request, - ActionListener.wrap( - searchResponse -> { - long totalHits = searchResponse.getHits().getTotalHits().value; - List ids = new ArrayList<>(searchResponse.getHits().getHits().length); - for (SearchHit hit : searchResponse.getHits().getHits()) { - BytesReference source = hit.getSourceRef(); - try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, stream)) { - ids.add((String) parser.map().get(DataFrameField.ID.getPreferredName())); - } catch (IOException e) { - foundIdsListener.onFailure(new ElasticsearchParseException("failed to parse search hit for ids", e)); - return; - } - } - requiredMatches.filterMatchedIds(ids); - if (requiredMatches.hasUnmatchedIds()) { - // some required Ids were not found - foundIdsListener.onFailure( - new ResourceNotFoundException( - DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, - requiredMatches.unmatchedIdsString()))); + executeAsyncWithOrigin(client.threadPool().getThreadContext(), DATA_FRAME_ORIGIN, request, ActionListener.wrap( + searchResponse -> { + long totalHits = searchResponse.getHits().getTotalHits().value; + // important: preserve order + Set ids = new LinkedHashSet<>(searchResponse.getHits().getHits().length); + for (SearchHit hit : searchResponse.getHits().getHits()) { + BytesReference source = hit.getSourceRef(); + try (InputStream stream = source.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, stream)) { + ids.add((String) parser.map().get(DataFrameField.ID.getPreferredName())); + } catch (IOException e) { + foundIdsListener.onFailure(new ElasticsearchParseException("failed to parse search hit for ids", e)); return; } - foundIdsListener.onResponse(new Tuple<>(totalHits, ids)); - }, - foundIdsListener::onFailure - ), client::search); + } + requiredMatches.filterMatchedIds(ids); + if (requiredMatches.hasUnmatchedIds()) { + // some required Ids were not found + foundIdsListener.onFailure( + new ResourceNotFoundException( + DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, + requiredMatches.unmatchedIdsString()))); + return; + } + foundIdsListener.onResponse(new Tuple<>(totalHits, new ArrayList<>(ids))); + }, foundIdsListener::onFailure), client::search); } /** @@ -314,15 +409,14 @@ public class DataFrameTransformsConfigManager { */ public void deleteTransform(String transformId, ActionListener listener) { DeleteByQueryRequest request = new DeleteByQueryRequest() - .setAbortOnVersionConflict(false); //since these documents are not updated, a conflict just means it was deleted previously + .setAbortOnVersionConflict(false); //since these documents are not updated, a conflict just means it was deleted previously - request.indices(DataFrameInternalIndex.INDEX_NAME); + request.indices(DataFrameInternalIndex.INDEX_NAME_PATTERN); QueryBuilder query = QueryBuilders.termQuery(DataFrameField.ID.getPreferredName(), transformId); request.setQuery(query); request.setRefresh(true); executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, DeleteByQueryAction.INSTANCE, request, ActionListener.wrap(deleteResponse -> { - if (deleteResponse.getDeleted() == 0) { listener.onFailure(new ResourceNotFoundException( DataFrameMessages.getMessage(DataFrameMessages.REST_DATA_FRAME_UNKNOWN_TRANSFORM, transformId))); @@ -343,9 +437,10 @@ public class DataFrameTransformsConfigManager { try (XContentBuilder builder = XContentFactory.jsonBuilder()) { XContentBuilder source = stats.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); - IndexRequest indexRequest = new IndexRequest(DataFrameInternalIndex.INDEX_NAME) + IndexRequest indexRequest = new IndexRequest(DataFrameInternalIndex.LATEST_INDEX_NAME) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .id(DataFrameTransformStoredDoc.documentId(stats.getId())) + .opType(DocWriteRequest.OpType.INDEX) .source(source); executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, IndexAction.INSTANCE, indexRequest, ActionListener.wrap( @@ -363,51 +458,56 @@ public class DataFrameTransformsConfigManager { } public void getTransformStoredDoc(String transformId, ActionListener resultListener) { - GetRequest getRequest = new GetRequest(DataFrameInternalIndex.INDEX_NAME, DataFrameTransformStoredDoc.documentId(transformId)); - executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, GetAction.INSTANCE, getRequest, ActionListener.wrap(getResponse -> { + QueryBuilder queryBuilder = QueryBuilders.termQuery("_id", DataFrameTransformStoredDoc.documentId(transformId)); + SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) + .setQuery(queryBuilder) + // use sort to get the last + .addSort("_index", SortOrder.DESC) + .setSize(1) + .request(); - if (getResponse.isExists() == false) { - resultListener.onFailure(new ResourceNotFoundException( - DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_UNKNOWN_TRANSFORM_STATS, transformId))); - return; - } - BytesReference source = getResponse.getSourceAsBytesRef(); - try (InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream)) { - resultListener.onResponse(DataFrameTransformStoredDoc.fromXContent(parser)); - } catch (Exception e) { - logger.error( - DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_PARSE_TRANSFORM_STATISTICS_CONFIGURATION, transformId), e); - resultListener.onFailure(e); - } - }, e -> { - if (e instanceof ResourceNotFoundException) { - resultListener.onFailure(new ResourceNotFoundException( - DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_UNKNOWN_TRANSFORM_STATS, transformId))); - } else { - resultListener.onFailure(e); - } - })); + executeAsyncWithOrigin(client, DATA_FRAME_ORIGIN, SearchAction.INSTANCE, searchRequest, ActionListener.wrap( + searchResponse -> { + if (searchResponse.getHits().getHits().length == 0) { + resultListener.onFailure(new ResourceNotFoundException( + DataFrameMessages.getMessage(DataFrameMessages.DATA_FRAME_UNKNOWN_TRANSFORM_STATS, transformId))); + return; + } + BytesReference source = searchResponse.getHits().getHits()[0].getSourceRef(); + try (InputStream stream = source.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream)) { + resultListener.onResponse(DataFrameTransformStoredDoc.fromXContent(parser)); + } catch (Exception e) { + logger.error(DataFrameMessages.getMessage(DataFrameMessages.FAILED_TO_PARSE_TRANSFORM_STATISTICS_CONFIGURATION, + transformId), e); + resultListener.onFailure(e); + } + }, resultListener::onFailure)); } public void getTransformStoredDoc(Collection transformIds, ActionListener> listener) { - QueryBuilder builder = QueryBuilders.constantScoreQuery(QueryBuilders.boolQuery() - .filter(QueryBuilders.termsQuery(DataFrameField.ID.getPreferredName(), transformIds)) - .filter(QueryBuilders.termQuery(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), DataFrameTransformStoredDoc.NAME))); + .filter(QueryBuilders.termsQuery(DataFrameField.ID.getPreferredName(), transformIds)) + .filter(QueryBuilders.termQuery(DataFrameField.INDEX_DOC_TYPE.getPreferredName(), DataFrameTransformStoredDoc.NAME))); - SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME) - .addSort(DataFrameField.ID.getPreferredName(), SortOrder.ASC) - .setQuery(builder) - .setSize(Math.min(transformIds.size(), 10_000)) - .request(); + SearchRequest searchRequest = client.prepareSearch(DataFrameInternalIndex.INDEX_NAME_PATTERN) + .addSort(DataFrameField.ID.getPreferredName(), SortOrder.ASC) + .addSort("_index", SortOrder.DESC) + .setQuery(builder) + // the limit for getting stats and transforms is 1000, as long as we do not have 10 indices this works + .setSize(Math.min(transformIds.size(), 10_000)) + .request(); executeAsyncWithOrigin(client.threadPool().getThreadContext(), DATA_FRAME_ORIGIN, searchRequest, - ActionListener.wrap( - searchResponse -> { - List stats = new ArrayList<>(); - for (SearchHit hit : searchResponse.getHits().getHits()) { + ActionListener.wrap( + searchResponse -> { + List stats = new ArrayList<>(); + String previousId = null; + for (SearchHit hit : searchResponse.getHits().getHits()) { + // skip old versions + if (hit.getId().equals(previousId) == false) { + previousId = hit.getId(); BytesReference source = hit.getSourceRef(); try (InputStream stream = source.streamInput(); XContentParser parser = XContentFactory.xContent(XContentType.JSON) @@ -419,17 +519,11 @@ public class DataFrameTransformsConfigManager { return; } } - - listener.onResponse(stats); - }, - e -> { - if (e.getClass() == IndexNotFoundException.class) { - listener.onResponse(Collections.emptyList()); - } else { - listener.onFailure(e); - } } - ), client::search); + + listener.onResponse(stats); + }, listener::onFailure + ), client::search); } private void parseTransformLenientlyFromSource(BytesReference source, String transformId, @@ -480,13 +574,37 @@ public class DataFrameTransformsConfigManager { return QueryBuilders.constantScoreQuery(queryBuilder); } - public static class SeqNoPrimaryTermPair { + private static Tuple getStatusAndReason(final BulkByScrollResponse response) { + RestStatus status = RestStatus.OK; + Throwable reason = new Exception("Unknown error"); + //Getting the max RestStatus is sort of arbitrary, would the user care about 5xx over 4xx? + //Unsure of a better way to return an appropriate and possibly actionable cause to the user. + for (BulkItemResponse.Failure failure : response.getBulkFailures()) { + if (failure.getStatus().getStatus() > status.getStatus()) { + status = failure.getStatus(); + reason = failure.getCause(); + } + } + + for (ScrollableHitSource.SearchFailure failure : response.getSearchFailures()) { + RestStatus failureStatus = org.elasticsearch.ExceptionsHelper.status(failure.getReason()); + if (failureStatus.getStatus() > status.getStatus()) { + status = failureStatus; + reason = failure.getReason(); + } + } + return new Tuple<>(status, reason); + } + + public static class SeqNoPrimaryTermAndIndex { private final long seqNo; private final long primaryTerm; + private final String index; - public SeqNoPrimaryTermPair(long seqNo, long primaryTerm) { + public SeqNoPrimaryTermAndIndex(long seqNo, long primaryTerm, String index) { this.seqNo = seqNo; this.primaryTerm = primaryTerm; + this.index = index; } public long getSeqNo() { @@ -496,5 +614,9 @@ public class DataFrameTransformsConfigManager { public long getPrimaryTerm() { return primaryTerm; } + + public String getIndex() { + return index; + } } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java index b3a6a80a0b1..347e0b91428 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/persistence/DataframeIndex.java @@ -100,7 +100,7 @@ public final class DataframeIndex { .field(DataFrameField.CREATED_BY, DataFrameField.DATA_FRAME_SIGNATURE) .startObject(DataFrameField.META_FIELDNAME) .field(DataFrameField.CREATION_DATE_MILLIS, clock.millis()) - .startObject(DataFrameField.VERSION) + .startObject(DataFrameField.VERSION.getPreferredName()) .field(DataFrameField.CREATED, Version.CURRENT) .endObject() .field(DataFrameField.TRANSFORM, id) diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java index dc37e937ea1..593c3c6e8a5 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java @@ -104,7 +104,7 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx IndexNameExpressionResolver resolver = new IndexNameExpressionResolver(); String[] indices = resolver.concreteIndexNames(clusterState, IndicesOptions.lenientExpandOpen(), - DataFrameInternalIndex.INDEX_TEMPLATE_PATTERN + "*"); + DataFrameInternalIndex.INDEX_NAME_PATTERN); List unavailableIndices = new ArrayList<>(indices.length); for (String index : indices) { IndexRoutingTable routingTable = clusterState.getRoutingTable().index(index); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index 641e3a0d1d7..973aa07b189 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.dataframe.transforms; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; @@ -21,7 +22,7 @@ import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexNotFoundException; @@ -58,6 +59,7 @@ import org.elasticsearch.xpack.dataframe.transforms.pivot.AggregationResultUtils import java.time.Instant; import java.util.Arrays; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; @@ -246,7 +248,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S * @param listener Started listener */ public synchronized void start(Long startingCheckpoint, boolean force, ActionListener listener) { - logger.debug("[{}] start called with force [{}] and state [{}]", getTransformId(), force, getState()); + logger.debug("[{}] start called with force [{}] and state [{}].", getTransformId(), force, getState()); if (taskState.get() == DataFrameTransformTaskState.FAILED && force == false) { listener.onFailure(new ElasticsearchStatusException( DataFrameMessages.getMessage(DATA_FRAME_CANNOT_START_FAILED_TRANSFORM, @@ -288,7 +290,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S null, getIndexer().getProgress()); - logger.info("Updating state for data frame transform [{}] to [{}]", transform.getId(), state.toString()); + logger.info("[{}] updating state for data frame transform to [{}].", transform.getId(), state.toString()); // Even though the indexer information is persisted to an index, we still need DataFrameTransformTaskState in the clusterstate // This keeps track of STARTED, FAILED, STOPPED // This is because a FAILED state can occur because we cannot read the config from the internal index, which would imply that @@ -304,6 +306,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S listener.onResponse(new StartDataFrameTransformTaskAction.Response(true)); }, exc -> { + logger.error(new ParameterizedMessage("[{}] failed updating state to [{}].", getTransformId(), state), exc); getIndexer().stop(); listener.onFailure(new ElasticsearchException("Error while updating state for data frame transform [" + transform.getId() + "] to [" + state.getIndexerState() + "].", exc)); @@ -352,12 +355,12 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } if (getIndexer() == null) { - logger.warn("Data frame task [{}] triggered with an unintialized indexer", getTransformId()); + logger.warn("[{}] data frame task triggered with an unintialized indexer.", getTransformId()); return; } if (taskState.get() == DataFrameTransformTaskState.FAILED) { - logger.debug("Schedule was triggered for transform [{}] but task is failed. Ignoring trigger.", getTransformId()); + logger.debug("[{}] schedule was triggered for transform but task is failed. Ignoring trigger.", getTransformId()); return; } @@ -366,15 +369,15 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S if (IndexerState.INDEXING.equals(indexerState) || IndexerState.STOPPING.equals(indexerState) || IndexerState.STOPPED.equals(indexerState)) { - logger.debug("Indexer for transform [{}] has state [{}], ignoring trigger", getTransformId(), indexerState); + logger.debug("[{}] indexer for transform has state [{}]. Ignoring trigger.", getTransformId(), indexerState); return; } - logger.debug("Data frame indexer [{}] schedule has triggered, state: [{}]", event.getJobName(), indexerState); + logger.debug("[{}] data frame indexer schedule has triggered, state: [{}].", event.getJobName(), indexerState); // if it runs for the 1st time we just do it, if not we check for changes if (currentCheckpoint.get() == 0) { - logger.debug("Trigger initial run"); + logger.debug("Trigger initial run."); getIndexer().maybeTriggerAsyncJob(System.currentTimeMillis()); } else if (getIndexer().isContinuous()) { getIndexer().maybeTriggerAsyncJob(System.currentTimeMillis()); @@ -405,12 +408,14 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S ActionListener> listener) { updatePersistentTaskState(state, ActionListener.wrap( success -> { - logger.debug("Successfully updated state for data frame transform [{}] to [{}]", transform.getId(), state.toString()); + logger.debug("[{}] successfully updated state for data frame transform to [{}].", transform.getId(), state.toString()); listener.onResponse(success); }, failure -> { auditor.warning(transform.getId(), "Failed to persist to state to cluster state: " + failure.getMessage()); - logger.error("Failed to update state for data frame transform [" + transform.getId() + "]", failure); + logger.error(new ParameterizedMessage("[{}] failed to update cluster state for data frame transform.", + transform.getId()), + failure); listener.onFailure(failure); } )); @@ -420,7 +425,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S // If we are already flagged as failed, this probably means that a second trigger started firing while we were attempting to // flag the previously triggered indexer as failed. Exit early as we are already flagged as failed. if (taskState.get() == DataFrameTransformTaskState.FAILED) { - logger.warn("[{}] is already failed but encountered new failure; reason [{}] ", getTransformId(), reason); + logger.warn("[{}] is already failed but encountered new failure; reason [{}].", getTransformId(), reason); listener.onResponse(null); return; } @@ -428,7 +433,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S // the indexer to fail. Since `ClientDataFrameIndexer#doSaveState` will persist the state to the index once the indexer stops, // it is probably best to NOT change the internal state of the task and allow the normal stopping logic to continue. if (getIndexer() != null && getIndexer().getState() == IndexerState.STOPPING) { - logger.info("Attempt to fail transform [" + getTransformId() + "] with reason [" + reason + "] while it was stopping."); + logger.info("[{}] attempt to fail transform with reason [{}] while it was stopping.", getTransformId(), reason); auditor.info(getTransformId(), "Attempted to fail transform with reason [" + reason + "] while in STOPPING state."); listener.onResponse(null); return; @@ -436,7 +441,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S // If we are stopped, this means that between the failure occurring and being handled, somebody called stop // We should just allow that stop to continue if (getIndexer() != null && getIndexer().getState() == IndexerState.STOPPED) { - logger.info("[{}] encountered a failure but indexer is STOPPED; reason [{}]", getTransformId(), reason); + logger.info("[{}] encountered a failure but indexer is STOPPED; reason [{}].", getTransformId(), reason); listener.onResponse(null); return; } @@ -454,7 +459,8 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S persistStateToClusterState(newState, ActionListener.wrap( r -> listener.onResponse(null), e -> { - logger.error("Failed to set task state as failed to cluster state", e); + logger.error(new ParameterizedMessage("[{}] failed to set task state as failed to cluster state.", getTransformId()), + e); listener.onFailure(e); } )); @@ -467,8 +473,9 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S */ @Override public synchronized void onCancelled() { - logger.info( - "Received cancellation request for data frame transform [" + transform.getId() + "], state: [" + taskState.get() + "]"); + logger.info("[{}] received cancellation request for data frame transform, state: [{}].", + getTransformId(), + taskState.get()); if (getIndexer() != null && getIndexer().abort()) { // there is no background transform running, we can shutdown safely shutdown(); @@ -632,6 +639,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S private volatile boolean auditBulkFailures = true; // Keeps track of the last exception that was written to our audit, keeps us from spamming the audit index private volatile String lastAuditedExceptionMessage = null; + private final AtomicBoolean oldStatsCleanedUp = new AtomicBoolean(false); ClientDataFrameIndexer(String transformId, DataFrameTransformsConfigManager transformsConfigManager, @@ -692,13 +700,15 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } TransformProgressGatherer.getInitialProgress(this.client, buildFilterQuery(), getConfig(), ActionListener.wrap( newProgress -> { - logger.trace("[{}] reset the progress from [{}] to [{}]", transformId, progress, newProgress); + logger.trace("[{}] reset the progress from [{}] to [{}].", transformId, progress, newProgress); progress = newProgress; super.onStart(now, listener); }, failure -> { progress = null; - logger.warn("Unable to load progress information for task [" + transformId + "]", failure); + logger.warn(new ParameterizedMessage("[{}] unable to load progress information for task.", + transformId), + failure); super.onStart(now, listener); } )); @@ -775,14 +785,14 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S @Override public synchronized boolean maybeTriggerAsyncJob(long now) { if (transformTask.taskState.get() == DataFrameTransformTaskState.FAILED) { - logger.debug("Schedule was triggered for transform [{}] but task is failed. Ignoring trigger.", getJobId()); + logger.debug("[{}] schedule was triggered for transform but task is failed. Ignoring trigger.", getJobId()); return false; } // ignore trigger if indexer is running, prevents log spam in A2P indexer IndexerState indexerState = getState(); if (IndexerState.INDEXING.equals(indexerState) || IndexerState.STOPPING.equals(indexerState)) { - logger.debug("Indexer for transform [{}] has state [{}], ignoring trigger", getJobId(), indexerState); + logger.debug("[{}] indexer for transform has state [{}]. Ignoring trigger.", getJobId(), indexerState); return false; } @@ -873,7 +883,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S indexerState = IndexerState.STOPPED; auditor.info(transformConfig.getId(), "Data frame finished indexing all data, initiating stop"); - logger.info("Data frame [{}] finished indexing all data, initiating stop", transformConfig.getId()); + logger.info("[{}] data frame transform finished indexing all data, initiating stop.", transformConfig.getId()); } final DataFrameTransformState state = new DataFrameTransformState( @@ -883,7 +893,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S transformTask.currentCheckpoint.get(), transformTask.stateReason.get(), getProgress()); - logger.debug("Updating persistent state of transform [{}] to [{}]", transformConfig.getId(), state.toString()); + logger.debug("[{}] updating persistent state of transform to [{}].", transformConfig.getId(), state.toString()); // Persist the current state and stats in the internal index. The interval of this method being // called is controlled by AsyncTwoPhaseIndexer#onBulkResponse which calls doSaveState every so @@ -896,10 +906,28 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S if (state.getTaskState().equals(DataFrameTransformTaskState.STOPPED)) { transformTask.shutdown(); } - next.run(); + // Only do this clean up once, if it succeeded, no reason to do the query again. + if (oldStatsCleanedUp.compareAndSet(false, true)) { + transformsConfigManager.deleteOldTransformStoredDocuments(transformId, ActionListener.wrap( + nil -> { + logger.trace("[{}] deleted old transform stats and state document", transformId); + next.run(); + }, + e -> { + String msg = LoggerMessageFormat.format("[{}] failed deleting old transform configurations.", + transformId); + logger.warn(msg, e); + // If we have failed, we should attempt the clean up again later + oldStatsCleanedUp.set(false); + next.run(); + } + )); + } }, statsExc -> { - logger.error("Updating stats of transform [" + transformConfig.getId() + "] failed", statsExc); + logger.error(new ParameterizedMessage("[{}] updating stats of transform failed.", + transformConfig.getId()), + statsExc); auditor.warning(getJobId(), "Failure updating stats of transform: " + statsExc.getMessage()); // for auto stop shutdown the task @@ -923,7 +951,9 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } handleFailure(exc); } catch (Exception e) { - logger.error("Data frame transform encountered an unexpected internal exception: " ,e); + logger.error( + new ParameterizedMessage("[{}] data frame transform encountered an unexpected internal exception: ", transformId), + e); } } @@ -948,7 +978,6 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S if (progress != null && progress.getPercentComplete() != null && progress.getPercentComplete() < 100.0) { progress.incrementDocsProcessed(progress.getTotalDocs() - progress.getDocumentsProcessed()); } - logger.info("Last checkpoint for {} {}", getJobId(), Strings.toString(lastCheckpoint)); // If the last checkpoint is now greater than 1, that means that we have just processed the first // continuous checkpoint and should start recording the exponential averages if (lastCheckpoint != null && lastCheckpoint.getCheckpoint() > 1) { @@ -968,7 +997,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S "Finished indexing for data frame transform checkpoint [" + checkpoint + "]."); } logger.debug( - "Finished indexing for data frame transform [" + transformTask.getTransformId() + "] checkpoint [" + checkpoint + "]"); + "[{}] finished indexing for data frame transform checkpoint [{}].", getJobId(), checkpoint); auditBulkFailures = true; listener.onResponse(null); } catch (Exception e) { @@ -990,7 +1019,10 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S if (++logCount % logEvery != 0) { return false; } - int log10Checkpoint = (int) Math.floor(Math.log10(completedCheckpoint + 1)); + if (completedCheckpoint == 0) { + return true; + } + int log10Checkpoint = (int) Math.floor(Math.log10(completedCheckpoint)); logEvery = log10Checkpoint >= 3 ? 1_000 : (int)Math.pow(10.0, log10Checkpoint); logCount = 0; return true; @@ -999,13 +1031,13 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S @Override protected void onStop() { auditor.info(transformConfig.getId(), "Data frame transform has stopped."); - logger.info("Data frame transform [{}] has stopped", transformConfig.getId()); + logger.info("[{}] data frame transform has stopped.", transformConfig.getId()); } @Override protected void onAbort() { auditor.info(transformConfig.getId(), "Received abort request, stopping data frame transform."); - logger.info("Data frame transform [" + transformConfig.getId() + "] received abort request, stopping indexer"); + logger.info("[{}] data frame transform received abort request. Stopping indexer.", transformConfig.getId()); transformTask.shutdown(); } @@ -1015,11 +1047,17 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S checkpoint -> transformsConfigManager.putTransformCheckpoint(checkpoint, ActionListener.wrap( putCheckPointResponse -> listener.onResponse(checkpoint), - createCheckpointException -> - listener.onFailure(new RuntimeException("Failed to create checkpoint", createCheckpointException)) + createCheckpointException -> { + logger.warn(new ParameterizedMessage("[{}] failed to create checkpoint.", transformId), + createCheckpointException); + listener.onFailure(new RuntimeException("Failed to create checkpoint", createCheckpointException)); + } )), - getCheckPointException -> - listener.onFailure(new RuntimeException("Failed to retrieve checkpoint", getCheckPointException)) + getCheckPointException -> { + logger.warn(new ParameterizedMessage("[{}] failed to retrieve checkpoint.", transformId), + getCheckPointException); + listener.onFailure(new RuntimeException("Failed to retrieve checkpoint", getCheckPointException)); + } )); } @@ -1028,12 +1066,14 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S checkpointProvider.sourceHasChanged(getLastCheckpoint(), ActionListener.wrap( hasChanged -> { - logger.trace("[{}] change detected [{}]", transformId, hasChanged); + logger.trace("[{}] change detected [{}].", transformId, hasChanged); hasChangedListener.onResponse(hasChanged); }, e -> { logger.warn( - "Failed to detect changes for data frame transform [" + transformId + "], skipping update till next check.", + new ParameterizedMessage( + "[{}] failed to detect changes for data frame transform. Skipping update till next check.", + transformId), e); auditor.warning(transformId, "Failed to detect changes for data frame transform, skipping update till next check. Exception: " @@ -1049,7 +1089,9 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } synchronized void handleFailure(Exception e) { - logger.warn("Data frame transform [" + transformTask.getTransformId() + "] encountered an exception: ", e); + logger.warn(new ParameterizedMessage("[{}] data frame transform encountered an exception: ", + transformTask.getTransformId()), + e); if (handleCircuitBreakingException(e)) { return; } @@ -1064,7 +1106,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S @Override protected void failIndexer(String failureMessage) { - logger.error("Data frame transform [" + getJobId() + "]: " + failureMessage); + logger.error("[{}] transform has failed; experienced: [{}].", getJobId(), failureMessage); auditor.error(transformTask.getTransformId(), failureMessage); transformTask.markAsFailed(failureMessage, ActionListener.wrap( r -> { diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameSingleNodeTestCase.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameSingleNodeTestCase.java index 2c2ad5ba0b3..34c16ebc9e7 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameSingleNodeTestCase.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/DataFrameSingleNodeTestCase.java @@ -29,8 +29,8 @@ public abstract class DataFrameSingleNodeTestCase extends ESSingleNodeTestCase { public void waitForTemplates() throws Exception { assertBusy(() -> { ClusterState state = client().admin().cluster().prepareState().get().getState(); - assertTrue("Timed out waiting for the data frame templates to be installed", - TemplateUtils.checkTemplateExistsAndVersionIsGTECurrentVersion(DataFrameInternalIndex.INDEX_TEMPLATE_NAME, state)); + assertTrue("Timed out waiting for the data frame templates to be installed", TemplateUtils + .checkTemplateExistsAndVersionIsGTECurrentVersion(DataFrameInternalIndex.LATEST_INDEX_VERSIONED_NAME, state)); }); } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java index 45c792f8d11..e403d102adf 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/persistence/DataFrameTransformsConfigManagerTests.java @@ -8,7 +8,16 @@ package org.elasticsearch.xpack.dataframe.persistence; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformCheckpoint; @@ -27,6 +36,9 @@ import java.util.Comparator; import java.util.List; import java.util.stream.Collectors; +import static org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex.mappings; +import static org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager.TO_XCONTENT_PARAMS; +import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -278,4 +290,71 @@ public class DataFrameTransformsConfigManagerTests extends DataFrameSingleNodeTe expectedDocs.sort(Comparator.comparing(DataFrameTransformStoredDoc::getId)); assertAsync(listener -> transformsConfigManager.getTransformStoredDoc(ids, listener), expectedDocs, null, null); } + + public void testDeleteOldTransformConfigurations() throws Exception { + String oldIndex = DataFrameInternalIndex.INDEX_PATTERN + "1"; + String transformId = "transform_test_delete_old_configurations"; + String docId = DataFrameTransformConfig.documentId(transformId); + DataFrameTransformConfig transformConfig = DataFrameTransformConfigTests + .randomDataFrameTransformConfig("transform_test_delete_old_configurations"); + client().admin().indices().create(new CreateIndexRequest(oldIndex) + .mapping(MapperService.SINGLE_MAPPING_NAME, mappings())).actionGet(); + + try(XContentBuilder builder = XContentFactory.jsonBuilder()) { + XContentBuilder source = transformConfig.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); + IndexRequest request = new IndexRequest(oldIndex) + .source(source) + .id(docId) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + client().index(request).actionGet(); + } + + assertAsync(listener -> transformsConfigManager.putTransformConfiguration(transformConfig, listener), true, null, null); + + assertThat(client().get(new GetRequest(oldIndex).id(docId)).actionGet().isExists(), is(true)); + assertThat(client().get(new GetRequest(DataFrameInternalIndex.LATEST_INDEX_NAME).id(docId)).actionGet().isExists(), is(true)); + + assertAsync(listener -> transformsConfigManager.deleteOldTransformConfigurations(transformId, listener), true, null, null); + + client().admin().indices().refresh(new RefreshRequest(DataFrameInternalIndex.INDEX_NAME_PATTERN)).actionGet(); + assertThat(client().get(new GetRequest(oldIndex).id(docId)).actionGet().isExists(), is(false)); + assertThat(client().get(new GetRequest(DataFrameInternalIndex.LATEST_INDEX_NAME).id(docId)).actionGet().isExists(), is(true)); + } + + public void testDeleteOldTransformStoredDocuments() throws Exception { + String oldIndex = DataFrameInternalIndex.INDEX_PATTERN + "1"; + String transformId = "transform_test_delete_old_stored_documents"; + String docId = DataFrameTransformStoredDoc.documentId(transformId); + DataFrameTransformStoredDoc dataFrameTransformStoredDoc = DataFrameTransformStoredDocTests + .randomDataFrameTransformStoredDoc(transformId); + client().admin().indices().create(new CreateIndexRequest(oldIndex) + .mapping(MapperService.SINGLE_MAPPING_NAME, mappings())).actionGet(); + + try(XContentBuilder builder = XContentFactory.jsonBuilder()) { + XContentBuilder source = dataFrameTransformStoredDoc.toXContent(builder, new ToXContent.MapParams(TO_XCONTENT_PARAMS)); + IndexRequest request = new IndexRequest(oldIndex) + .source(source) + .id(docId); + client().index(request).actionGet(); + } + + assertAsync(listener -> transformsConfigManager.putOrUpdateTransformStoredDoc(dataFrameTransformStoredDoc, listener), + true, + null, + null); + + client().admin().indices().refresh(new RefreshRequest(DataFrameInternalIndex.INDEX_NAME_PATTERN)).actionGet(); + + assertThat(client().get(new GetRequest(oldIndex).id(docId)).actionGet().isExists(), is(true)); + assertThat(client().get(new GetRequest(DataFrameInternalIndex.LATEST_INDEX_NAME).id(docId)).actionGet().isExists(), is(true)); + + assertAsync(listener -> transformsConfigManager.deleteOldTransformStoredDocuments(transformId, listener), + true, + null, + null); + + client().admin().indices().refresh(new RefreshRequest(DataFrameInternalIndex.INDEX_NAME_PATTERN)).actionGet(); + assertThat(client().get(new GetRequest(oldIndex).id(docId)).actionGet().isExists(), is(false)); + assertThat(client().get(new GetRequest(DataFrameInternalIndex.LATEST_INDEX_NAME).id(docId)).actionGet().isExists(), is(true)); + } } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexerTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexerTests.java index 2090e75ab45..4a23a57efcc 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexerTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/ClientDataFrameIndexerTests.java @@ -74,30 +74,30 @@ public class ClientDataFrameIndexerTests extends ESTestCase { // Audit every checkpoint for the first 10 assertTrue(shouldAudit.get(0)); assertTrue(shouldAudit.get(1)); - assertTrue(shouldAudit.get(9)); + assertTrue(shouldAudit.get(10)); // Then audit every 10 while < 100 - assertFalse(shouldAudit.get(10)); assertFalse(shouldAudit.get(11)); - assertTrue(shouldAudit.get(19)); - assertTrue(shouldAudit.get(29)); - assertFalse(shouldAudit.get(30)); - assertTrue(shouldAudit.get(99)); + assertTrue(shouldAudit.get(20)); + assertFalse(shouldAudit.get(29)); + assertTrue(shouldAudit.get(30)); + assertFalse(shouldAudit.get(99)); // Then audit every 100 < 1000 - assertFalse(shouldAudit.get(100)); + assertTrue(shouldAudit.get(100)); assertFalse(shouldAudit.get(109)); assertFalse(shouldAudit.get(110)); - assertTrue(shouldAudit.get(199)); + assertFalse(shouldAudit.get(199)); // Then audit every 1000 for the rest of time - assertTrue(shouldAudit.get(1999)); + assertFalse(shouldAudit.get(1999)); assertFalse(shouldAudit.get(2199)); - assertTrue(shouldAudit.get(2999)); - assertTrue(shouldAudit.get(9999)); - assertTrue(shouldAudit.get(10_999)); - assertFalse(shouldAudit.get(11_000)); - assertTrue(shouldAudit.get(11_999)); + assertTrue(shouldAudit.get(3000)); + assertTrue(shouldAudit.get(10_000)); + assertFalse(shouldAudit.get(10_999)); + assertTrue(shouldAudit.get(11_000)); + assertFalse(shouldAudit.get(11_001)); + assertFalse(shouldAudit.get(11_999)); } } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutorTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutorTests.java index 25ef6e43cbe..ee30609e1a5 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutorTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutorTests.java @@ -135,7 +135,7 @@ public class DataFrameTransformPersistentTasksExecutorTests extends ESTestCase { metaData = new MetaData.Builder(cs.metaData()); routingTable = new RoutingTable.Builder(cs.routingTable()); - String indexToRemove = DataFrameInternalIndex.INDEX_NAME; + String indexToRemove = DataFrameInternalIndex.LATEST_INDEX_NAME; if (randomBoolean()) { routingTable.remove(indexToRemove); } else { @@ -158,7 +158,7 @@ public class DataFrameTransformPersistentTasksExecutorTests extends ESTestCase { private void addIndices(MetaData.Builder metaData, RoutingTable.Builder routingTable) { List indices = new ArrayList<>(); indices.add(DataFrameInternalIndex.AUDIT_INDEX); - indices.add(DataFrameInternalIndex.INDEX_NAME); + indices.add(DataFrameInternalIndex.LATEST_INDEX_NAME); for (String indexName : indices) { IndexMetaData.Builder indexMetaData = IndexMetaData.builder(indexName); indexMetaData.settings(Settings.builder() diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java index 194c412ffe2..dfb344f829d 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationChecks.java @@ -49,7 +49,8 @@ public class DeprecationChecks { IndexDeprecationChecks::oldIndicesCheck, IndexDeprecationChecks::tooManyFieldsCheck, IndexDeprecationChecks::chainedMultiFieldsCheck, - IndexDeprecationChecks::deprecatedDateTimeFormat + IndexDeprecationChecks::deprecatedDateTimeFormat, + IndexDeprecationChecks::translogRetentionSettingCheck )); static List> ML_SETTINGS_CHECKS = diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java index ead1b665ba7..38a0d0ad5cc 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecks.java @@ -231,4 +231,19 @@ public class IndexDeprecationChecks { return fields; } + + static DeprecationIssue translogRetentionSettingCheck(IndexMetaData indexMetaData) { + final boolean softDeletesEnabled = IndexSettings.INDEX_SOFT_DELETES_SETTING.get(indexMetaData.getSettings()); + if (softDeletesEnabled) { + if (IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.exists(indexMetaData.getSettings()) + || IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.exists(indexMetaData.getSettings())) { + return new DeprecationIssue(DeprecationIssue.Level.WARNING, + "translog retention settings are ignored", + "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-translog.html", + "translog retention settings [index.translog.retention.size] and [index.translog.retention.age] are ignored " + + "because translog is no longer used in peer recoveries with soft-deletes enabled (default in 7.0 or later)"); + } + } + return null; + } } diff --git a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java index 0655da1db08..e32e24aeafb 100644 --- a/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java +++ b/x-pack/plugin/deprecation/src/test/java/org/elasticsearch/xpack/deprecation/IndexDeprecationChecksTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.joda.JodaDeprecationPatterns; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.deprecation.DeprecationIssue; @@ -27,6 +28,8 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS; import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.collection.IsIterableContainingInOrder.contains; public class IndexDeprecationChecksTests extends ESTestCase { public void testOldIndicesCheck() { @@ -382,4 +385,31 @@ public class IndexDeprecationChecksTests extends ESTestCase { } mappingBuilder.endObject(); } + + public void testTranslogRetentionSettings() { + Settings.Builder settings = settings(Version.CURRENT); + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomPositiveTimeValue()); + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), between(1, 1024) + "b"); + IndexMetaData indexMetaData = IndexMetaData.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); + List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); + assertThat(issues, contains( + new DeprecationIssue(DeprecationIssue.Level.WARNING, + "translog retention settings are ignored", + "https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-translog.html", + "translog retention settings [index.translog.retention.size] and [index.translog.retention.age] are ignored " + + "because translog is no longer used in peer recoveries with soft-deletes enabled (default in 7.0 or later)") + )); + } + + public void testDefaultTranslogRetentionSettings() { + Settings.Builder settings = settings(Version.CURRENT); + if (randomBoolean()) { + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomPositiveTimeValue()); + settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), between(1, 1024) + "b"); + settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); + } + IndexMetaData indexMetaData = IndexMetaData.builder("test").settings(settings).numberOfShards(1).numberOfReplicas(0).build(); + List issues = DeprecationChecks.filterChecks(INDEX_SETTINGS_CHECKS, c -> c.apply(indexMetaData)); + assertThat(issues, empty()); + } } diff --git a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java index 3e93062cf15..812a3800527 100644 --- a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java +++ b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java @@ -411,7 +411,7 @@ public class FrozenIndexTests extends ESSingleNodeTestCase { public void testTranslogStats() throws Exception { final String indexName = "test"; - createIndex(indexName, Settings.builder() + IndexService indexService = createIndex(indexName, Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build()); @@ -420,7 +420,6 @@ public class FrozenIndexTests extends ESSingleNodeTestCase { for (long i = 0; i < nbDocs; i++) { final IndexResponse indexResponse = client().prepareIndex(indexName, "_doc", Long.toString(i)).setSource("field", i).get(); assertThat(indexResponse.status(), is(RestStatus.CREATED)); - if (rarely()) { client().admin().indices().prepareFlush(indexName).get(); uncommittedOps = 0; @@ -431,7 +430,8 @@ public class FrozenIndexTests extends ESSingleNodeTestCase { IndicesStatsResponse stats = client().admin().indices().prepareStats(indexName).clear().setTranslog(true).get(); assertThat(stats.getIndex(indexName), notNullValue()); - assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), equalTo(nbDocs)); + assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), equalTo( + indexService.getIndexSettings().isSoftDeleteEnabled() ? uncommittedOps : nbDocs)); assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().getUncommittedOperations(), equalTo(uncommittedOps)); assertAcked(new XPackClient(client()).freeze(new FreezeRequest(indexName))); @@ -440,7 +440,8 @@ public class FrozenIndexTests extends ESSingleNodeTestCase { IndicesOptions indicesOptions = IndicesOptions.STRICT_EXPAND_OPEN_CLOSED; stats = client().admin().indices().prepareStats(indexName).setIndicesOptions(indicesOptions).clear().setTranslog(true).get(); assertThat(stats.getIndex(indexName), notNullValue()); - assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), equalTo(nbDocs)); + assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().estimatedNumberOfOperations(), + equalTo(indexService.getIndexSettings().isSoftDeleteEnabled() ? 0 : nbDocs)); assertThat(stats.getIndex(indexName).getPrimaries().getTranslog().getUncommittedOperations(), equalTo(0)); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java index fdc4762e7e3..25ee9351780 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java @@ -164,8 +164,8 @@ public class IndexLifecycle extends Plugin implements ActionPlugin { getClock(), System::currentTimeMillis, xContentRegistry)); SnapshotLifecycleTemplateRegistry templateRegistry = new SnapshotLifecycleTemplateRegistry(settings, clusterService, threadPool, client, xContentRegistry); - snapshotHistoryStore.set(new SnapshotHistoryStore(settings, new OriginSettingClient(client, INDEX_LIFECYCLE_ORIGIN), - getClock().getZone())); + snapshotHistoryStore.set(new SnapshotHistoryStore(settings, new OriginSettingClient(client, INDEX_LIFECYCLE_ORIGIN), clusterService + )); snapshotLifecycleService.set(new SnapshotLifecycleService(settings, () -> new SnapshotLifecycleTask(client, clusterService, snapshotHistoryStore.get()), clusterService, getClock())); return Arrays.asList(indexLifecycleInitialisationService.get(), snapshotLifecycleService.get(), snapshotHistoryStore.get()); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java index 8ba12e433fb..65897c7e1ee 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java @@ -232,7 +232,7 @@ public class SnapshotLifecycleTaskTests extends ESTestCase { Consumer verifier; public VerifyingHistoryStore(Client client, ZoneId timeZone, Consumer verifier) { - super(Settings.EMPTY, client, timeZone); + super(Settings.EMPTY, client, null); this.verifier = verifier; } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java index 66bbe908fd0..114437a01c8 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.xpack.core.ml.integration.MlRestTestStateCleaner; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.TimingStats; import org.elasticsearch.xpack.ml.MachineLearning; import org.junit.After; @@ -35,6 +36,7 @@ import java.util.regex.Pattern; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.not; public class MlJobIT extends ESRestTestCase { @@ -413,6 +415,55 @@ public class MlJobIT extends ESRestTestCase { client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"))); } + public void testDeleteJob_TimingStatsDocumentIsDeleted() throws Exception { + String jobId = "delete-job-with-timing-stats-document-job"; + String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; + createFarequoteJob(jobId); + + assertThat( + EntityUtils.toString(client().performRequest(new Request("GET", indexName + "/_count")).getEntity()), + containsString("\"count\":0")); // documents related to the job do not exist yet + + Response openResponse = + client().performRequest(new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_open")); + assertThat(entityAsMap(openResponse), hasEntry("opened", true)); + + Request postDataRequest = new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_data"); + postDataRequest.setJsonEntity("{ \"airline\":\"LOT\", \"response_time\":100, \"time\":\"2019-07-01 00:00:00Z\" }"); + client().performRequest(postDataRequest); + postDataRequest.setJsonEntity("{ \"airline\":\"LOT\", \"response_time\":100, \"time\":\"2019-07-01 02:00:00Z\" }"); + client().performRequest(postDataRequest); + + Response flushResponse = + client().performRequest(new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_flush")); + assertThat(entityAsMap(flushResponse), hasEntry("flushed", true)); + + Response closeResponse = + client().performRequest(new Request("POST", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_close")); + assertThat(entityAsMap(closeResponse), hasEntry("closed", true)); + + String timingStatsDoc = + EntityUtils.toString( + client().performRequest(new Request("GET", indexName + "/_doc/" + TimingStats.documentId(jobId))).getEntity()); + assertThat(timingStatsDoc, containsString("\"bucket_count\":2")); // TimingStats doc exists, 2 buckets have been processed + + client().performRequest(new Request("DELETE", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId)); + + waitUntilIndexIsEmpty(indexName); // when job is being deleted, it also deletes all related documents from the shared index + + // check that the TimingStats documents got deleted + ResponseException exception = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request("GET", indexName + "/_doc/" + TimingStats.documentId(jobId)))); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + + // check that the job itself is gone + exception = expectThrows( + ResponseException.class, + () -> client().performRequest(new Request("GET", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats"))); + assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + } + public void testDeleteJobAsync() throws Exception { String jobId = "delete-job-async-job"; String indexName = AnomalyDetectorsIndexFields.RESULTS_INDEX_PREFIX + AnomalyDetectorsIndexFields.RESULTS_INDEX_DEFAULT; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java index d6c15275c6e..e06feb4d6aa 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSet.java @@ -24,10 +24,13 @@ import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; +import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsStatsAction; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; @@ -157,6 +160,7 @@ public class MachineLearningFeatureSet implements XPackFeatureSet { private final boolean enabled; private Map jobsUsage; private Map datafeedsUsage; + private Map analyticsUsage; private int nodeCount; public Retriever(Client client, JobManagerHolder jobManagerHolder, boolean available, boolean enabled, int nodeCount) { @@ -166,6 +170,7 @@ public class MachineLearningFeatureSet implements XPackFeatureSet { this.enabled = enabled; this.jobsUsage = new LinkedHashMap<>(); this.datafeedsUsage = new LinkedHashMap<>(); + this.analyticsUsage = new LinkedHashMap<>(); this.nodeCount = nodeCount; } @@ -173,19 +178,39 @@ public class MachineLearningFeatureSet implements XPackFeatureSet { // empty holder means either ML disabled or transport client mode if (jobManagerHolder.isEmpty()) { listener.onResponse( - new MachineLearningFeatureSetUsage(available, enabled, Collections.emptyMap(), Collections.emptyMap(), 0)); + new MachineLearningFeatureSetUsage(available, + enabled, + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + 0)); return; } + // Step 3. Extract usage from data frame analytics and return usage response + ActionListener dataframeAnalyticsListener = ActionListener.wrap( + response -> { + addDataFrameAnalyticsUsage(response, analyticsUsage); + listener.onResponse(new MachineLearningFeatureSetUsage(available, + enabled, + jobsUsage, + datafeedsUsage, + analyticsUsage, + nodeCount)); + }, + listener::onFailure + ); + // Step 2. Extract usage from datafeeds stats and return usage response ActionListener datafeedStatsListener = - ActionListener.wrap(response -> { - addDatafeedsUsage(response); - listener.onResponse(new MachineLearningFeatureSetUsage( - available, enabled, jobsUsage, datafeedsUsage, nodeCount)); - }, - listener::onFailure - ); + ActionListener.wrap(response -> { + addDatafeedsUsage(response); + GetDataFrameAnalyticsStatsAction.Request dataframeAnalyticsStatsRequest = + new GetDataFrameAnalyticsStatsAction.Request(GetDatafeedsStatsAction.ALL); + dataframeAnalyticsStatsRequest.setPageParams(new PageParams(0, 10_000)); + client.execute(GetDataFrameAnalyticsStatsAction.INSTANCE, dataframeAnalyticsStatsRequest, dataframeAnalyticsListener); + }, + listener::onFailure); // Step 1. Extract usage from jobs stats and then request stats for all datafeeds GetJobsStatsAction.Request jobStatsRequest = new GetJobsStatsAction.Request(MetaData.ALL); @@ -283,17 +308,31 @@ public class MachineLearningFeatureSet implements XPackFeatureSet { ds -> Counter.newCounter()).addAndGet(1); } - datafeedsUsage.put(MachineLearningFeatureSetUsage.ALL, createDatafeedUsageEntry(response.getResponse().count())); + datafeedsUsage.put(MachineLearningFeatureSetUsage.ALL, createCountUsageEntry(response.getResponse().count())); for (DatafeedState datafeedState : datafeedCountByState.keySet()) { datafeedsUsage.put(datafeedState.name().toLowerCase(Locale.ROOT), - createDatafeedUsageEntry(datafeedCountByState.get(datafeedState).get())); + createCountUsageEntry(datafeedCountByState.get(datafeedState).get())); } } - private Map createDatafeedUsageEntry(long count) { + private Map createCountUsageEntry(long count) { Map usage = new HashMap<>(); usage.put(MachineLearningFeatureSetUsage.COUNT, count); return usage; } + + private void addDataFrameAnalyticsUsage(GetDataFrameAnalyticsStatsAction.Response response, + Map dataframeAnalyticsUsage) { + Map dataFrameAnalyticsStateCounterMap = new HashMap<>(); + + for(GetDataFrameAnalyticsStatsAction.Response.Stats stats : response.getResponse().results()) { + dataFrameAnalyticsStateCounterMap.computeIfAbsent(stats.getState(), ds -> Counter.newCounter()).addAndGet(1); + } + dataframeAnalyticsUsage.put(MachineLearningFeatureSetUsage.ALL, createCountUsageEntry(response.getResponse().count())); + for (DataFrameAnalyticsState state : dataFrameAnalyticsStateCounterMap.keySet()) { + dataframeAnalyticsUsage.put(state.name().toLowerCase(Locale.ROOT), + createCountUsageEntry(dataFrameAnalyticsStateCounterMap.get(state).get())); + } + } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java index bb7365cd538..2ca09af7d33 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportEvaluateDataFrameAction.java @@ -40,7 +40,7 @@ public class TransportEvaluateDataFrameAction extends HandledTransportAction listener) { Evaluation evaluation = request.getEvaluation(); SearchRequest searchRequest = new SearchRequest(request.getIndices()); - searchRequest.source(evaluation.buildSearch()); + searchRequest.source(evaluation.buildSearch(request.getParsedQuery())); ActionListener> resultsListener = ActionListener.wrap( results -> listener.onResponse(new EvaluateDataFrameAction.Response(evaluation.getName(), results)), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java index fac084c0fc8..00d8c15e418 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManager.java @@ -83,7 +83,8 @@ public class MemoryUsageEstimationProcessManager { onProcessCrash(jobId, processHolder)); processHolder.process = process; if (process.isProcessAlive() == false) { - String errorMsg = new ParameterizedMessage("[{}] Error while starting process", jobId).getFormattedMessage(); + String errorMsg = + new ParameterizedMessage("[{}] Error while starting process: {}", jobId, process.readError()).getFormattedMessage(); throw ExceptionsHelper.serverError(errorMsg); } try { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java index ca076050578..8f2ed47794a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningFeatureSetTests.java @@ -33,10 +33,13 @@ import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.action.util.QueryPage; import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; import org.elasticsearch.xpack.core.ml.MachineLearningField; +import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsAction; +import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsStatsAction; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; import org.elasticsearch.xpack.core.ml.job.config.DataDescription; import org.elasticsearch.xpack.core.ml.job.config.Detector; @@ -94,6 +97,7 @@ public class MachineLearningFeatureSetTests extends ESTestCase { when(clusterService.state()).thenReturn(clusterState); givenJobs(Collections.emptyList(), Collections.emptyList()); givenDatafeeds(Collections.emptyList()); + givenDataFrameAnalytics(Collections.emptyList()); } public void testIsRunningOnMlPlatform() { @@ -171,6 +175,11 @@ public class MachineLearningFeatureSetTests extends ESTestCase { buildDatafeedStats(DatafeedState.STARTED), buildDatafeedStats(DatafeedState.STOPPED) )); + givenDataFrameAnalytics(Arrays.asList( + buildDataFrameAnalyticsStats(DataFrameAnalyticsState.STOPPED), + buildDataFrameAnalyticsStats(DataFrameAnalyticsState.STOPPED), + buildDataFrameAnalyticsStats(DataFrameAnalyticsState.STARTED) + )); MachineLearningFeatureSet featureSet = new MachineLearningFeatureSet(TestEnvironment.newEnvironment(settings.build()), clusterService, client, licenseState, jobManagerHolder); @@ -237,6 +246,10 @@ public class MachineLearningFeatureSetTests extends ESTestCase { assertThat(source.getValue("datafeeds.started.count"), equalTo(2)); assertThat(source.getValue("datafeeds.stopped.count"), equalTo(1)); + assertThat(source.getValue("data_frame_analytics_jobs._all.count"), equalTo(3)); + assertThat(source.getValue("data_frame_analytics_jobs.started.count"), equalTo(1)); + assertThat(source.getValue("data_frame_analytics_jobs.stopped.count"), equalTo(2)); + assertThat(source.getValue("jobs._all.forecasts.total"), equalTo(11)); assertThat(source.getValue("jobs._all.forecasts.forecasted_jobs"), equalTo(2)); @@ -418,6 +431,19 @@ public class MachineLearningFeatureSetTests extends ESTestCase { }).when(client).execute(same(GetDatafeedsStatsAction.INSTANCE), any(), any()); } + private void givenDataFrameAnalytics(List dataFrameAnalyticsStats) { + doAnswer(invocationOnMock -> { + @SuppressWarnings("unchecked") + ActionListener listener = + (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(new GetDataFrameAnalyticsStatsAction.Response( + new QueryPage<>(dataFrameAnalyticsStats, + dataFrameAnalyticsStats.size(), + GetDataFrameAnalyticsAction.Response.RESULTS_FIELD))); + return Void.TYPE; + }).when(client).execute(same(GetDataFrameAnalyticsStatsAction.INSTANCE), any(), any()); + } + private static Detector buildMinDetector(String fieldName) { Detector.Builder detectorBuilder = new Detector.Builder(); detectorBuilder.setFunction("min"); @@ -458,6 +484,12 @@ public class MachineLearningFeatureSetTests extends ESTestCase { return stats; } + private static GetDataFrameAnalyticsStatsAction.Response.Stats buildDataFrameAnalyticsStats(DataFrameAnalyticsState state) { + GetDataFrameAnalyticsStatsAction.Response.Stats stats = mock(GetDataFrameAnalyticsStatsAction.Response.Stats.class); + when(stats.getState()).thenReturn(state); + return stats; + } + private static ForecastStats buildForecastStats(long numberOfForecasts) { return new ForecastStatsTests().createForecastStats(numberOfForecasts, numberOfForecasts); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManagerTests.java index 82532ca430f..5a647c8178b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/MemoryUsageEstimationProcessManagerTests.java @@ -95,6 +95,7 @@ public class MemoryUsageEstimationProcessManagerTests extends ESTestCase { public void testRunJob_ProcessNotAlive() { when(process.isProcessAlive()).thenReturn(false); + when(process.readError()).thenReturn("Error from inside the process"); processManager.runJobAsync(TASK_ID, dataFrameAnalyticsConfig, dataExtractorFactory, listener); @@ -103,8 +104,10 @@ public class MemoryUsageEstimationProcessManagerTests extends ESTestCase { assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); assertThat(exception.getMessage(), containsString(TASK_ID)); assertThat(exception.getMessage(), containsString("Error while starting process")); + assertThat(exception.getMessage(), containsString("Error from inside the process")); verify(process).isProcessAlive(); + verify(process).readError(); verifyNoMoreInteractions(process, listener); } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index ecf7781708e..7d36fe76481 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.rollup.job; import org.apache.lucene.search.TotalHits; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequest; @@ -51,7 +50,6 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.spy; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/45770") public class RollupIndexerStateTests extends ESTestCase { private static class EmptyRollupIndexer extends RollupIndexer { EmptyRollupIndexer(Executor executor, RollupJob job, AtomicReference initialState, @@ -262,7 +260,7 @@ public class RollupIndexerStateTests extends ESTestCase { indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); - ESTestCase.awaitBusy(() -> indexer.getState() == IndexerState.STARTED); + assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED))); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); assertThat(indexer.getStats().getNumPages(), equalTo(1L)); assertThat(indexer.getStats().getIndexFailures(), equalTo(0L)); @@ -286,9 +284,18 @@ public class RollupIndexerStateTests extends ESTestCase { protected void onFinish(ActionListener listener) { super.onFinish(ActionListener.wrap(r -> { listener.onResponse(r); - isFinished.set(true); }, listener::onFailure)); } + + @Override + protected void doSaveState(IndexerState state, Map position, Runnable next) { + super.doSaveState(state, position, () -> { + if (state == IndexerState.STARTED) { + isFinished.set(true); + } + next.run(); + }); + } }; final CountDownLatch latch = indexer.newLatch(); indexer.start(); @@ -296,7 +303,7 @@ public class RollupIndexerStateTests extends ESTestCase { assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); - ESTestCase.awaitBusy(() -> isFinished.get()); + assertBusy(() -> assertTrue(isFinished.get())); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); assertThat(indexer.getStats().getNumPages(), equalTo(1L)); @@ -309,7 +316,7 @@ public class RollupIndexerStateTests extends ESTestCase { } } - public void testStateChangeMidTrigger() throws Exception { + public void testStateChangeMidTrigger() { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); RollupIndexerJobStats stats = new RollupIndexerJobStats(); @@ -389,7 +396,7 @@ public class RollupIndexerStateTests extends ESTestCase { assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); - ESTestCase.awaitBusy(() -> aborted.get()); + assertBusy(() -> assertTrue(aborted.get())); assertThat(indexer.getState(), equalTo(IndexerState.ABORTING)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); assertThat(indexer.getStats().getNumPages(), equalTo(0L)); @@ -477,7 +484,7 @@ public class RollupIndexerStateTests extends ESTestCase { assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); doNextSearchLatch.countDown(); - ESTestCase.awaitBusy(() -> aborted.get()); + assertBusy(() -> assertTrue(aborted.get())); assertThat(indexer.getState(), equalTo(IndexerState.ABORTING)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); assertThat(indexer.getStats().getNumPages(), equalTo(1L)); @@ -501,7 +508,7 @@ public class RollupIndexerStateTests extends ESTestCase { assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); assertThat(indexer.stop(), equalTo(IndexerState.STOPPING)); latch.countDown(); - ESTestCase.awaitBusy(() -> indexer.getState() == IndexerState.STOPPED); + assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STOPPED))); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); @@ -528,14 +535,14 @@ public class RollupIndexerStateTests extends ESTestCase { assertFalse(indexer.abort()); assertThat(indexer.getState(), equalTo(IndexerState.ABORTING)); latch.countDown(); - ESTestCase.awaitBusy(() -> isAborted.get()); + assertBusy(() -> assertTrue(isAborted.get())); assertFalse(indexer.abort()); } finally { executor.shutdownNow(); } } - public void testAbortStarted() throws Exception { + public void testAbortStarted() { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); @@ -582,7 +589,7 @@ public class RollupIndexerStateTests extends ESTestCase { assertFalse(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); - ESTestCase.awaitBusy(() -> indexer.getState() == IndexerState.STARTED); + assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STARTED))); assertThat(indexer.getStats().getNumInvocations(), equalTo((long) i + 1)); assertThat(indexer.getStats().getNumPages(), equalTo((long) i + 1)); } @@ -591,7 +598,7 @@ public class RollupIndexerStateTests extends ESTestCase { assertThat(indexer.stop(), equalTo(IndexerState.STOPPING)); assertThat(indexer.getState(), equalTo(IndexerState.STOPPING)); latch.countDown(); - ESTestCase.awaitBusy(() -> indexer.getState() == IndexerState.STOPPED); + assertBusy(() -> assertThat(indexer.getState(), equalTo(IndexerState.STOPPED))); assertTrue(indexer.abort()); } finally { executor.shutdownNow(); @@ -674,21 +681,25 @@ public class RollupIndexerStateTests extends ESTestCase { Consumer failureConsumer = e -> { assertThat(e.getMessage(), equalTo("Could not identify key in agg [foo]")); - isFinished.set(true); + }; + BiConsumer> stateCheck = (i, p) -> { + if (i == IndexerState.STARTED) { + isFinished.set(true); + } }; final ExecutorService executor = Executors.newFixedThreadPool(1); try { NonEmptyRollupIndexer indexer = new NonEmptyRollupIndexer(executor, job, state, null, - searchFunction, bulkFunction, failureConsumer); + searchFunction, bulkFunction, failureConsumer, stateCheck); final CountDownLatch latch = indexer.newLatch(1); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); - ESTestCase.awaitBusy(() -> isFinished.get()); + assertBusy(() -> assertTrue(isFinished.get())); // Despite failure in bulk, we should move back to STARTED and wait to try again on next trigger assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); @@ -800,7 +811,7 @@ public class RollupIndexerStateTests extends ESTestCase { assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); - ESTestCase.awaitBusy(() -> isFinished.get()); + assertBusy(() -> assertTrue(isFinished.get())); // Despite failure in processing keys, we should continue moving to STOPPED assertThat(indexer.getState(), equalTo(IndexerState.STOPPED)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); @@ -830,21 +841,25 @@ public class RollupIndexerStateTests extends ESTestCase { Consumer failureConsumer = e -> { assertThat(e.getMessage(), startsWith("Partial shards failure")); - isFinished.set(true); + }; + BiConsumer> stateCheck = (i, p) -> { + if (i == IndexerState.STARTED) { + isFinished.set(true); + } }; final ExecutorService executor = Executors.newFixedThreadPool(1); try { NonEmptyRollupIndexer indexer = new NonEmptyRollupIndexer(executor, job, state, null, - searchFunction, bulkFunction, failureConsumer); + searchFunction, bulkFunction, failureConsumer, stateCheck); final CountDownLatch latch = indexer.newLatch(1); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); - ESTestCase.awaitBusy(() -> isFinished.get()); + assertBusy(() -> assertTrue(isFinished.get())); // Despite failure in bulk, we should move back to STARTED and wait to try again on next trigger assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); @@ -939,14 +954,18 @@ public class RollupIndexerStateTests extends ESTestCase { Consumer failureConsumer = e -> { assertThat(e.getMessage(), equalTo("failed")); - isFinished.set(true); + }; + BiConsumer> stateCheck = (i, p) -> { + if (i == IndexerState.STARTED) { + isFinished.set(true); + } }; final ExecutorService executor = Executors.newFixedThreadPool(1); try { NonEmptyRollupIndexer indexer = new NonEmptyRollupIndexer(executor, job, state, null, - searchFunction, bulkFunction, failureConsumer) { + searchFunction, bulkFunction, failureConsumer, stateCheck) { @Override protected void doNextBulk(BulkRequest request, ActionListener nextPhase) { nextPhase.onFailure(new RuntimeException("failed")); @@ -958,7 +977,7 @@ public class RollupIndexerStateTests extends ESTestCase { assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); latch.countDown(); - ESTestCase.awaitBusy(() -> isFinished.get()); + assertBusy(() -> assertTrue(isFinished.get())); // Despite failure in bulk, we should move back to STARTED and wait to try again on next trigger assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java index 4b30224dcd4..a100afe33aa 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateGenerateTool.java @@ -285,12 +285,12 @@ public class CertificateGenerateTool extends EnvironmentAwareCommand { final List errors = certInfo.validate(); if (errors.size() > 0) { hasError = true; - terminal.println(Terminal.Verbosity.SILENT, "Configuration for instance " + certInfo.name.originalName + terminal.errorPrintln(Terminal.Verbosity.SILENT, "Configuration for instance " + certInfo.name.originalName + " has invalid details"); for (String message : errors) { - terminal.println(Terminal.Verbosity.SILENT, " * " + message); + terminal.errorPrintln(Terminal.Verbosity.SILENT, " * " + message); } - terminal.println(""); + terminal.errorPrintln(""); } } if (hasError) { diff --git a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java index 435305b8a69..53e3fadf168 100644 --- a/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java +++ b/x-pack/plugin/security/cli/src/main/java/org/elasticsearch/xpack/security/cli/CertificateTool.java @@ -417,7 +417,7 @@ public class CertificateTool extends LoggingAwareMultiCommand { if (validationErrors.isEmpty()) { return Collections.singleton(information); } else { - validationErrors.forEach(terminal::println); + validationErrors.forEach(terminal::errorPrintln); return Collections.emptyList(); } } @@ -477,7 +477,7 @@ public class CertificateTool extends LoggingAwareMultiCommand { if (Name.isValidFilename(filename)) { return filename; } else { - terminal.println(Terminal.Verbosity.SILENT, "'" + filename + "' is not a valid filename"); + terminal.errorPrintln(Terminal.Verbosity.SILENT, "'" + filename + "' is not a valid filename"); continue; } } @@ -891,11 +891,12 @@ public class CertificateTool extends LoggingAwareMultiCommand { final List errors = certInfo.validate(); if (errors.size() > 0) { hasError = true; - terminal.println(Verbosity.SILENT, "Configuration for instance " + certInfo.name.originalName + " has invalid details"); + terminal.errorPrintln(Verbosity.SILENT, "Configuration for instance " + certInfo.name.originalName + + " has invalid details"); for (String message : errors) { - terminal.println(Verbosity.SILENT, " * " + message); + terminal.errorPrintln(Verbosity.SILENT, " * " + message); } - terminal.println(""); + terminal.errorPrintln(""); } } if (hasError) { @@ -961,7 +962,7 @@ public class CertificateTool extends LoggingAwareMultiCommand { return; } if (Files.exists(parent)) { - terminal.println(Terminal.Verbosity.SILENT, "Path " + parent + " exists, but is not a directory. Cannot write to " + path); + terminal.errorPrintln(Terminal.Verbosity.SILENT, "Path " + parent + " exists, but is not a directory. Cannot write to " + path); throw new UserException(ExitCodes.CANT_CREATE, "Cannot write to " + path); } if (terminal.promptYesNo("Directory " + parent + " does not exist. Do you want to create it?", true)) { diff --git a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java index 9e970ea559a..6845edbdc6b 100644 --- a/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java +++ b/x-pack/plugin/security/cli/src/test/java/org/elasticsearch/xpack/security/cli/CertificateToolTests.java @@ -242,8 +242,8 @@ public class CertificateToolTests extends ESTestCase { () -> CertificateTool.parseAndValidateFile(terminal, instanceFile)); assertThat(exception.getMessage(), containsString("invalid configuration")); assertThat(exception.getMessage(), containsString(instanceFile.toString())); - assertThat(terminal.getOutput(), containsString("THIS=not a,valid DN")); - assertThat(terminal.getOutput(), containsString("could not be converted to a valid DN")); + assertThat(terminal.getErrorOutput(), containsString("THIS=not a,valid DN")); + assertThat(terminal.getErrorOutput(), containsString("could not be converted to a valid DN")); } public void testGeneratingCsr() throws Exception { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java index 866f3722e6e..5ac81a06480 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordTool.java @@ -195,15 +195,15 @@ public class SetupPasswordTool extends LoggingAwareMultiCommand { SecureString password1 = new SecureString(terminal.readSecret("Enter password for [" + user + "]: ")); Validation.Error err = Validation.Users.validatePassword(password1); if (err != null) { - terminal.println(err.toString()); - terminal.println("Try again."); + terminal.errorPrintln(err.toString()); + terminal.errorPrintln("Try again."); password1.close(); continue; } try (SecureString password2 = new SecureString(terminal.readSecret("Reenter password for [" + user + "]: "))) { if (password1.equals(password2) == false) { - terminal.println("Passwords do not match."); - terminal.println("Try again."); + terminal.errorPrintln("Passwords do not match."); + terminal.errorPrintln("Try again."); password1.close(); continue; } @@ -302,53 +302,55 @@ public class SetupPasswordTool extends LoggingAwareMultiCommand { // keystore password is not valid if (httpCode == HttpURLConnection.HTTP_UNAUTHORIZED) { - terminal.println(""); - terminal.println("Failed to authenticate user '" + elasticUser + "' against " + route.toString()); - terminal.println("Possible causes include:"); - terminal.println(" * The password for the '" + elasticUser + "' user has already been changed on this cluster"); - terminal.println(" * Your elasticsearch node is running against a different keystore"); - terminal.println(" This tool used the keystore at " + KeyStoreWrapper.keystorePath(env.configFile())); - terminal.println(""); + terminal.errorPrintln(""); + terminal.errorPrintln("Failed to authenticate user '" + elasticUser + "' against " + route.toString()); + terminal.errorPrintln("Possible causes include:"); + terminal.errorPrintln(" * The password for the '" + elasticUser + "' user has already been changed on this cluster"); + terminal.errorPrintln(" * Your elasticsearch node is running against a different keystore"); + terminal.errorPrintln(" This tool used the keystore at " + KeyStoreWrapper.keystorePath(env.configFile())); + terminal.errorPrintln(""); throw new UserException(ExitCodes.CONFIG, "Failed to verify bootstrap password"); } else if (httpCode != HttpURLConnection.HTTP_OK) { - terminal.println(""); - terminal.println("Unexpected response code [" + httpCode + "] from calling GET " + route.toString()); + terminal.errorPrintln(""); + terminal.errorPrintln("Unexpected response code [" + httpCode + "] from calling GET " + route.toString()); XPackSecurityFeatureConfig xPackSecurityFeatureConfig = getXPackSecurityConfig(terminal); if (xPackSecurityFeatureConfig.isAvailable == false) { - terminal.println("It doesn't look like the X-Pack security feature is available on this Elasticsearch node."); - terminal.println("Please check if you have installed a license that allows access to X-Pack Security feature."); - terminal.println(""); + terminal.errorPrintln("It doesn't look like the X-Pack security feature is available on this Elasticsearch node."); + terminal.errorPrintln("Please check if you have installed a license that allows access to " + + "X-Pack Security feature."); + terminal.errorPrintln(""); throw new UserException(ExitCodes.CONFIG, "X-Pack Security is not available."); } if (xPackSecurityFeatureConfig.isEnabled == false) { - terminal.println("It doesn't look like the X-Pack security feature is enabled on this Elasticsearch node."); - terminal.println("Please check if you have enabled X-Pack security in your elasticsearch.yml configuration file."); - terminal.println(""); + terminal.errorPrintln("It doesn't look like the X-Pack security feature is enabled on this Elasticsearch node."); + terminal.errorPrintln("Please check if you have enabled X-Pack security in your elasticsearch.yml " + + "configuration file."); + terminal.errorPrintln(""); throw new UserException(ExitCodes.CONFIG, "X-Pack Security is disabled by configuration."); } - terminal.println("X-Pack security feature is available and enabled on this Elasticsearch node."); - terminal.println("Possible causes include:"); - terminal.println(" * The relative path of the URL is incorrect. Is there a proxy in-between?"); - terminal.println(" * The protocol (http/https) does not match the port."); - terminal.println(" * Is this really an Elasticsearch server?"); - terminal.println(""); + terminal.errorPrintln("X-Pack security feature is available and enabled on this Elasticsearch node."); + terminal.errorPrintln("Possible causes include:"); + terminal.errorPrintln(" * The relative path of the URL is incorrect. Is there a proxy in-between?"); + terminal.errorPrintln(" * The protocol (http/https) does not match the port."); + terminal.errorPrintln(" * Is this really an Elasticsearch server?"); + terminal.errorPrintln(""); throw new UserException(ExitCodes.CONFIG, "Unknown error"); } } catch (SSLException e) { - terminal.println(""); - terminal.println("SSL connection to " + route.toString() + " failed: " + e.getMessage()); - terminal.println("Please check the elasticsearch SSL settings under " + XPackSettings.HTTP_SSL_PREFIX); - terminal.println(Verbosity.VERBOSE, ""); - terminal.println(Verbosity.VERBOSE, ExceptionsHelper.stackTrace(e)); - terminal.println(""); + terminal.errorPrintln(""); + terminal.errorPrintln("SSL connection to " + route.toString() + " failed: " + e.getMessage()); + terminal.errorPrintln("Please check the elasticsearch SSL settings under " + XPackSettings.HTTP_SSL_PREFIX); + terminal.errorPrintln(Verbosity.VERBOSE, ""); + terminal.errorPrintln(Verbosity.VERBOSE, ExceptionsHelper.stackTrace(e)); + terminal.errorPrintln(""); throw new UserException(ExitCodes.CONFIG, "Failed to establish SSL connection to elasticsearch at " + route.toString() + ". ", e); } catch (IOException e) { - terminal.println(""); - terminal.println("Connection failure to: " + route.toString() + " failed: " + e.getMessage()); - terminal.println(Verbosity.VERBOSE, ""); - terminal.println(Verbosity.VERBOSE, ExceptionsHelper.stackTrace(e)); - terminal.println(""); + terminal.errorPrintln(""); + terminal.errorPrintln("Connection failure to: " + route.toString() + " failed: " + e.getMessage()); + terminal.errorPrintln(Verbosity.VERBOSE, ""); + terminal.errorPrintln(Verbosity.VERBOSE, ExceptionsHelper.stackTrace(e)); + terminal.errorPrintln(""); throw new UserException(ExitCodes.CONFIG, "Failed to connect to elasticsearch at " + route.toString() + ". Is the URL correct and elasticsearch running?", e); } @@ -361,19 +363,20 @@ public class SetupPasswordTool extends LoggingAwareMultiCommand { final HttpResponse httpResponse = client.execute("GET", route, elasticUser, elasticUserPassword, () -> null, is -> responseBuilder(is, terminal)); if (httpResponse.getHttpStatus() != HttpURLConnection.HTTP_OK) { - terminal.println(""); - terminal.println("Unexpected response code [" + httpResponse.getHttpStatus() + "] from calling GET " + route.toString()); + terminal.errorPrintln(""); + terminal.errorPrintln("Unexpected response code [" + httpResponse.getHttpStatus() + "] from calling GET " + + route.toString()); if (httpResponse.getHttpStatus() == HttpURLConnection.HTTP_BAD_REQUEST) { - terminal.println("It doesn't look like the X-Pack is available on this Elasticsearch node."); - terminal.println("Please check that you have followed all installation instructions and that this tool"); - terminal.println(" is pointing to the correct Elasticsearch server."); - terminal.println(""); + terminal.errorPrintln("It doesn't look like the X-Pack is available on this Elasticsearch node."); + terminal.errorPrintln("Please check that you have followed all installation instructions and that this tool"); + terminal.errorPrintln(" is pointing to the correct Elasticsearch server."); + terminal.errorPrintln(""); throw new UserException(ExitCodes.CONFIG, "X-Pack is not available on this Elasticsearch node."); } else { - terminal.println("* Try running this tool again."); - terminal.println("* Verify that the tool is pointing to the correct Elasticsearch server."); - terminal.println("* Check the elasticsearch logs for additional error details."); - terminal.println(""); + terminal.errorPrintln("* Try running this tool again."); + terminal.errorPrintln("* Verify that the tool is pointing to the correct Elasticsearch server."); + terminal.errorPrintln("* Check the elasticsearch logs for additional error details."); + terminal.errorPrintln(""); throw new UserException(ExitCodes.TEMP_FAILURE, "Failed to determine x-pack security feature configuration."); } } @@ -406,33 +409,34 @@ public class SetupPasswordTool extends LoggingAwareMultiCommand { final HttpResponse httpResponse = client.execute("GET", route, elasticUser, elasticUserPassword, () -> null, is -> responseBuilder(is, terminal)); if (httpResponse.getHttpStatus() != HttpURLConnection.HTTP_OK) { - terminal.println(""); - terminal.println("Failed to determine the health of the cluster running at " + url); - terminal.println("Unexpected response code [" + httpResponse.getHttpStatus() + "] from calling GET " + route.toString()); + terminal.errorPrintln(""); + terminal.errorPrintln("Failed to determine the health of the cluster running at " + url); + terminal.errorPrintln("Unexpected response code [" + httpResponse.getHttpStatus() + "] from calling GET " + + route.toString()); final String cause = getErrorCause(httpResponse); if (cause != null) { - terminal.println("Cause: " + cause); + terminal.errorPrintln("Cause: " + cause); } } else { final String clusterStatus = Objects.toString(httpResponse.getResponseBody().get("status"), ""); if (clusterStatus.isEmpty()) { - terminal.println(""); - terminal.println("Failed to determine the health of the cluster running at " + url); - terminal.println("Could not find a 'status' value at " + route.toString()); + terminal.errorPrintln(""); + terminal.errorPrintln("Failed to determine the health of the cluster running at " + url); + terminal.errorPrintln("Could not find a 'status' value at " + route.toString()); } else if ("red".equalsIgnoreCase(clusterStatus)) { - terminal.println(""); - terminal.println("Your cluster health is currently RED."); - terminal.println("This means that some cluster data is unavailable and your cluster is not fully functional."); + terminal.errorPrintln(""); + terminal.errorPrintln("Your cluster health is currently RED."); + terminal.errorPrintln("This means that some cluster data is unavailable and your cluster is not fully functional."); } else { // Cluster is yellow/green -> all OK return; } } - terminal.println(""); - terminal.println( + terminal.errorPrintln(""); + terminal.errorPrintln( "It is recommended that you resolve the issues with your cluster before running elasticsearch-setup-passwords."); - terminal.println("It is very likely that the password changes will fail when run against an unhealthy cluster."); - terminal.println(""); + terminal.errorPrintln("It is very likely that the password changes will fail when run against an unhealthy cluster."); + terminal.errorPrintln(""); if (shouldPrompt) { final boolean keepGoing = terminal.promptYesNo("Do you want to continue with the password setup process", false); if (keepGoing == false) { @@ -465,28 +469,28 @@ public class SetupPasswordTool extends LoggingAwareMultiCommand { } }, is -> responseBuilder(is, terminal)); if (httpResponse.getHttpStatus() != HttpURLConnection.HTTP_OK) { - terminal.println(""); - terminal.println( + terminal.errorPrintln(""); + terminal.errorPrintln( "Unexpected response code [" + httpResponse.getHttpStatus() + "] from calling PUT " + route.toString()); String cause = getErrorCause(httpResponse); if (cause != null) { - terminal.println("Cause: " + cause); - terminal.println(""); + terminal.errorPrintln("Cause: " + cause); + terminal.errorPrintln(""); } - terminal.println("Possible next steps:"); - terminal.println("* Try running this tool again."); - terminal.println("* Try running with the --verbose parameter for additional messages."); - terminal.println("* Check the elasticsearch logs for additional error details."); - terminal.println("* Use the change password API manually. "); - terminal.println(""); + terminal.errorPrintln("Possible next steps:"); + terminal.errorPrintln("* Try running this tool again."); + terminal.errorPrintln("* Try running with the --verbose parameter for additional messages."); + terminal.errorPrintln("* Check the elasticsearch logs for additional error details."); + terminal.errorPrintln("* Use the change password API manually. "); + terminal.errorPrintln(""); throw new UserException(ExitCodes.TEMP_FAILURE, "Failed to set password for user [" + user + "]."); } } catch (IOException e) { - terminal.println(""); - terminal.println("Connection failure to: " + route.toString() + " failed: " + e.getMessage()); - terminal.println(Verbosity.VERBOSE, ""); - terminal.println(Verbosity.VERBOSE, ExceptionsHelper.stackTrace(e)); - terminal.println(""); + terminal.errorPrintln(""); + terminal.errorPrintln("Connection failure to: " + route.toString() + " failed: " + e.getMessage()); + terminal.errorPrintln(Verbosity.VERBOSE, ""); + terminal.errorPrintln(Verbosity.VERBOSE, ExceptionsHelper.stackTrace(e)); + terminal.errorPrintln(""); throw new UserException(ExitCodes.TEMP_FAILURE, "Failed to set password for user [" + user + "].", e); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java index 6d51fc5df93..03bed43499d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java @@ -475,11 +475,11 @@ public class UsersTool extends LoggingAwareMultiCommand { Set knownRoles = Sets.union(FileRolesStore.parseFileForRoleNames(rolesFile, null), ReservedRolesStore.names()); Set unknownRoles = Sets.difference(Sets.newHashSet(roles), knownRoles); if (!unknownRoles.isEmpty()) { - terminal.println(String.format(Locale.ROOT, "Warning: The following roles [%s] are not in the [%s] file. Make sure the names " + - "are correct. If the names are correct and the roles were created using the API please disregard this message. " + - "Nonetheless the user will still be associated with all specified roles", + terminal.errorPrintln(String.format(Locale.ROOT, "Warning: The following roles [%s] are not in the [%s] file. " + + "Make sure the names are correct. If the names are correct and the roles were created using the API please " + + "disregard this message. Nonetheless the user will still be associated with all specified roles", Strings.collectionToCommaDelimitedString(unknownRoles), rolesFile.toAbsolutePath())); - terminal.println("Known roles: " + knownRoles.toString()); + terminal.errorPrintln("Known roles: " + knownRoles.toString()); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java index a60b2204095..68be01a2e3f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommand.java @@ -224,7 +224,7 @@ public class SamlMetadataCommand extends EnvironmentAwareCommand { if (ContactInfo.TYPES.containsKey(type)) { break; } else { - terminal.println("Type '" + type + "' is not valid. Valid values are " + terminal.errorPrintln("Type '" + type + "' is not valid. Valid values are " + Strings.collectionToCommaDelimitedString(ContactInfo.TYPES.keySet())); } } @@ -263,8 +263,8 @@ public class SamlMetadataCommand extends EnvironmentAwareCommand { } else { errorMessage = "Error building signing credentials from provided keyPair"; } - terminal.println(Terminal.Verbosity.SILENT, errorMessage); - terminal.println("The following errors were found:"); + terminal.errorPrintln(Terminal.Verbosity.SILENT, errorMessage); + terminal.errorPrintln("The following errors were found:"); printExceptions(terminal, e); throw new UserException(ExitCodes.CANT_CREATE, "Unable to create metadata document"); } @@ -351,15 +351,16 @@ public class SamlMetadataCommand extends EnvironmentAwareCommand { SamlUtils.validate(xmlInput, METADATA_SCHEMA); terminal.println(Terminal.Verbosity.VERBOSE, "The generated metadata file conforms to the SAML metadata schema"); } catch (SAXException e) { - terminal.println(Terminal.Verbosity.SILENT, "Error - The generated metadata file does not conform to the SAML metadata schema"); - terminal.println("While validating " + xml.toString() + " the follow errors were found:"); + terminal.errorPrintln(Terminal.Verbosity.SILENT, "Error - The generated metadata file does not conform to the " + + "SAML metadata schema"); + terminal.errorPrintln("While validating " + xml.toString() + " the follow errors were found:"); printExceptions(terminal, e); throw new UserException(ExitCodes.CODE_ERROR, "Generated metadata is not valid"); } } private void printExceptions(Terminal terminal, Throwable throwable) { - terminal.println(" - " + throwable.getMessage()); + terminal.errorPrintln(" - " + throwable.getMessage()); for (Throwable sup : throwable.getSuppressed()) { printExceptions(terminal, sup); } @@ -453,10 +454,10 @@ public class SamlMetadataCommand extends EnvironmentAwareCommand { throw new UserException(ExitCodes.CONFIG, "There is no SAML realm configured in " + env.configFile()); } if (saml.size() > 1) { - terminal.println("Using configuration in " + env.configFile()); - terminal.println("Found multiple SAML realms: " + terminal.errorPrintln("Using configuration in " + env.configFile()); + terminal.errorPrintln("Found multiple SAML realms: " + saml.stream().map(Map.Entry::getKey).map(Object::toString).collect(Collectors.joining(", "))); - terminal.println("Use the -" + optionName(realmSpec) + " option to specify an explicit realm"); + terminal.errorPrintln("Use the -" + optionName(realmSpec) + " option to specify an explicit realm"); throw new UserException(ExitCodes.CONFIG, "Found multiple SAML realms, please specify one with '-" + optionName(realmSpec) + "'"); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/FileAttributesChecker.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/FileAttributesChecker.java index 6ef8461db82..5dd7c2b9cfb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/FileAttributesChecker.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/FileAttributesChecker.java @@ -51,19 +51,19 @@ public class FileAttributesChecker { PosixFileAttributes newAttributes = view.readAttributes(); PosixFileAttributes oldAttributes = attributes[i]; if (oldAttributes.permissions().equals(newAttributes.permissions()) == false) { - terminal.println(Terminal.Verbosity.SILENT, "WARNING: The file permissions of [" + paths[i] + "] have changed " + terminal.errorPrintln(Terminal.Verbosity.SILENT, "WARNING: The file permissions of [" + paths[i] + "] have changed " + "from [" + PosixFilePermissions.toString(oldAttributes.permissions()) + "] " + "to [" + PosixFilePermissions.toString(newAttributes.permissions()) + "]"); - terminal.println(Terminal.Verbosity.SILENT, + terminal.errorPrintln(Terminal.Verbosity.SILENT, "Please ensure that the user account running Elasticsearch has read access to this file!"); } if (oldAttributes.owner().getName().equals(newAttributes.owner().getName()) == false) { - terminal.println(Terminal.Verbosity.SILENT, "WARNING: Owner of file [" + paths[i] + "] " + terminal.errorPrintln(Terminal.Verbosity.SILENT, "WARNING: Owner of file [" + paths[i] + "] " + "used to be [" + oldAttributes.owner().getName() + "], " + "but now is [" + newAttributes.owner().getName() + "]"); } if (oldAttributes.group().getName().equals(newAttributes.group().getName()) == false) { - terminal.println(Terminal.Verbosity.SILENT, "WARNING: Group of file [" + paths[i] + "] " + terminal.errorPrintln(Terminal.Verbosity.SILENT, "WARNING: Group of file [" + paths[i] + "] " + "used to be [" + oldAttributes.group().getName() + "], " + "but now is [" + newAttributes.group().getName() + "]"); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolTests.java index e93950739a1..6e821720069 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/tool/SetupPasswordToolTests.java @@ -350,7 +350,7 @@ public class SetupPasswordToolTests extends CommandTestCase { fail("Should have thrown exception"); } catch (UserException e) { assertEquals(ExitCodes.OK, e.exitCode); - assertThat(terminal.getOutput(), Matchers.containsString("Your cluster health is currently RED.")); + assertThat(terminal.getErrorOutput(), Matchers.containsString("Your cluster health is currently RED.")); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommandTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommandTests.java index 367921ad763..734ea0be0d4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommandTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlMetadataCommandTests.java @@ -165,9 +165,9 @@ public class SamlMetadataCommandTests extends SamlTestCase { final UserException userException = expectThrows(UserException.class, () -> command.buildEntityDescriptor(terminal, options, env)); assertThat(userException.getMessage(), containsString("multiple SAML realms")); - assertThat(terminal.getOutput(), containsString("saml_a")); - assertThat(terminal.getOutput(), containsString("saml_b")); - assertThat(terminal.getOutput(), containsString("Use the -realm option")); + assertThat(terminal.getErrorOutput(), containsString("saml_a")); + assertThat(terminal.getErrorOutput(), containsString("saml_b")); + assertThat(terminal.getErrorOutput(), containsString("Use the -realm option")); } public void testSpecifyRealmNameAsParameter() throws Exception { @@ -423,7 +423,7 @@ public class SamlMetadataCommandTests extends SamlTestCase { final UserException userException = expectThrows(UserException.class, () -> command.possiblySignDescriptor(terminal, options, descriptor, env)); assertThat(userException.getMessage(), containsString("Unable to create metadata document")); - assertThat(terminal.getOutput(), containsString("Error parsing Private Key from")); + assertThat(terminal.getErrorOutput(), containsString("Error parsing Private Key from")); } public void testSigningMetadataWithPem() throws Exception { diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java index 313d0cdb5cf..aaf028181a1 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/SqlSecurityTestCase.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.client.Request; -import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -97,14 +96,14 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase { } return Paths.get(auditLogFileString); } - + @SuppressForbidden(reason="security doesn't work with mock filesystem") private static Path lookupRolledOverAuditLog() { String auditLogFileString = System.getProperty("tests.audit.yesterday.logfile"); if (null == auditLogFileString) { throw new IllegalStateException("tests.audit.yesterday.logfile must be set to run this test. It should be automatically " + "set by gradle."); - } + } return Paths.get(auditLogFileString); } @@ -120,7 +119,7 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase { * How much of the audit log was written before the test started. */ private static long auditLogWrittenBeforeTestStart; - + /** * If the audit log file rolled over. This is a rare case possible only at midnight. */ @@ -188,7 +187,7 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase { } catch (IOException e) { throw new RuntimeException(e); } - + // The log file can roll over without being caught by assertLogs() method: in those tests where exceptions are being handled // and no audit logs being read (and, thus, assertLogs() is not called) - for example testNoMonitorMain() method: there are no // calls to auditLogs(), and the method could run while the audit file is rolled over. @@ -205,12 +204,7 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase { @AfterClass public static void wipeIndicesAfterTests() throws IOException { try { - adminClient().performRequest(new Request("DELETE", "*")); - } catch (ResponseException e) { - // 404 here just means we had no indexes - if (e.getResponse().getStatusLine().getStatusCode() != 404) { - throw e; - } + wipeAllIndices(); } finally { // Clear the static state so other subclasses can reuse it later oneTimeSetup = false; @@ -586,7 +580,7 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase { if (sm != null) { sm.checkPermission(new SpecialPermission()); } - + BufferedReader[] logReaders = new BufferedReader[2]; AccessController.doPrivileged((PrivilegedAction) () -> { try { @@ -604,7 +598,7 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase { throw new RuntimeException(e); } }); - + // The "index" is used as a way of reading from both rolled over file and current audit file in order: rolled over file // first, then the audit log file. Very rarely we will read from the rolled over file: when the test happened to run // at midnight and the audit file rolled over during the test. diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/20_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/20_stats.yml index e73c7793022..b189fba7c79 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/20_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/20_stats.yml @@ -10,8 +10,8 @@ setup: --- "Translog stats on frozen indices": - skip: - version: " - 7.2.99" - reason: "frozen indices have translog stats starting version 7.3.0" + version: " - 7.3.99" + reason: "start ignoring translog retention policy with soft-deletes enabled in 7.4" - do: index: @@ -47,7 +47,7 @@ setup: - do: indices.stats: metric: [ translog ] - - match: { indices.test.primaries.translog.operations: 3 } + - match: { indices.test.primaries.translog.operations: 0 } - match: { indices.test.primaries.translog.uncommitted_operations: 0 } # unfreeze index @@ -60,5 +60,5 @@ setup: - do: indices.stats: metric: [ translog ] - - match: { indices.test.primaries.translog.operations: 3 } + - match: { indices.test.primaries.translog.operations: 0 } - match: { indices.test.primaries.translog.uncommitted_operations: 0 } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml index a4d3c1f1979..7459e695901 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/evaluate_data_frame.yml @@ -5,6 +5,7 @@ setup: index: utopia body: > { + "dataset": "blue", "is_outlier": false, "is_outlier_int": 0, "outlier_score": 0.0, @@ -19,6 +20,7 @@ setup: index: utopia body: > { + "dataset": "blue", "is_outlier": false, "is_outlier_int": 0, "outlier_score": 0.2, @@ -33,6 +35,7 @@ setup: index: utopia body: > { + "dataset": "blue", "is_outlier": false, "is_outlier_int": 0, "outlier_score": 0.3, @@ -47,6 +50,7 @@ setup: index: utopia body: > { + "dataset": "blue", "is_outlier": true, "is_outlier_int": 1, "outlier_score": 0.3, @@ -61,6 +65,7 @@ setup: index: utopia body: > { + "dataset": "green", "is_outlier": true, "is_outlier_int": 1, "outlier_score": 0.4, @@ -75,6 +80,7 @@ setup: index: utopia body: > { + "dataset": "green", "is_outlier": true, "is_outlier_int": 1, "outlier_score": 0.5, @@ -89,6 +95,7 @@ setup: index: utopia body: > { + "dataset": "green", "is_outlier": true, "is_outlier_int": 1, "outlier_score": 0.9, @@ -103,6 +110,7 @@ setup: index: utopia body: > { + "dataset": "green", "is_outlier": true, "is_outlier_int": 1, "outlier_score": 0.95, @@ -305,6 +313,33 @@ setup: tn: 3 fn: 2 +--- +"Test binary_soft_classification with query": + - do: + ml.evaluate_data_frame: + body: > + { + "index": "utopia", + "query": { "bool": { "filter": { "term": { "dataset": "blue" } } } }, + "evaluation": { + "binary_soft_classification": { + "actual_field": "is_outlier", + "predicted_probability_field": "outlier_score", + "metrics": { + "confusion_matrix": { "at": [0.5] } + } + } + } + } + - match: + binary_soft_classification: + confusion_matrix: + '0.5': + tp: 0 + fp: 0 + tn: 3 + fn: 1 + --- "Test binary_soft_classification default metrics": - do: diff --git a/x-pack/plugin/watcher/build.gradle b/x-pack/plugin/watcher/build.gradle index bfd447adc26..3aaee650c85 100644 --- a/x-pack/plugin/watcher/build.gradle +++ b/x-pack/plugin/watcher/build.gradle @@ -67,6 +67,10 @@ thirdPartyAudit { ) } +forbiddenPatterns { + exclude '**/*.p12' +} + // pulled in as external dependency to work on java 9 rootProject.globalInfo.ready { if (project.runtimeJavaVersion <= JavaVersion.VERSION_1_8) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index ee4ebec0b0b..9cd1c811c92 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -269,11 +269,12 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin, Reloa new WatcherIndexTemplateRegistry(environment.settings(), clusterService, threadPool, client, xContentRegistry); + final SSLService sslService = getSslService(); // http client - httpClient = new HttpClient(settings, getSslService(), cryptoService, clusterService); + httpClient = new HttpClient(settings, sslService, cryptoService, clusterService); // notification - EmailService emailService = new EmailService(settings, cryptoService, clusterService.getClusterSettings()); + EmailService emailService = new EmailService(settings, cryptoService, sslService, clusterService.getClusterSettings()); JiraService jiraService = new JiraService(settings, httpClient, clusterService.getClusterSettings()); SlackService slackService = new SlackService(settings, httpClient, clusterService.getClusterSettings()); PagerDutyService pagerDutyService = new PagerDutyService(settings, httpClient, clusterService.getClusterSettings()); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java index fca29821bfa..f1dd7be1965 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.watcher; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -25,6 +27,7 @@ import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils; import java.util.Collections; import java.util.Comparator; +import java.util.EnumSet; import java.util.List; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; @@ -35,10 +38,12 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; public class WatcherLifeCycleService implements ClusterStateListener { + private static final Logger logger = LogManager.getLogger(WatcherLifeCycleService.class); private final AtomicReference state = new AtomicReference<>(WatcherState.STARTED); private final AtomicReference> previousShardRoutings = new AtomicReference<>(Collections.emptyList()); private volatile boolean shutDown = false; // indicates that the node has been shutdown and we should never start watcher after this. private volatile WatcherService watcherService; + private final EnumSet stopStates = EnumSet.of(WatcherState.STOPPED, WatcherState.STOPPING); WatcherLifeCycleService(ClusterService clusterService, WatcherService watcherService) { this.watcherService = watcherService; @@ -57,8 +62,10 @@ public class WatcherLifeCycleService implements ClusterStateListener { this.state.set(WatcherState.STOPPING); shutDown = true; clearAllocationIds(); - watcherService.shutDown(); - this.state.set(WatcherState.STOPPED); + watcherService.shutDown(() -> { + this.state.set(WatcherState.STOPPED); + logger.info("watcher has stopped and shutdown"); + }); } /** @@ -88,9 +95,10 @@ public class WatcherLifeCycleService implements ClusterStateListener { } boolean isWatcherStoppedManually = isWatcherStoppedManually(event.state()); + boolean isStoppedOrStopping = stopStates.contains(this.state.get()); // if this is not a data node, we need to start it ourselves possibly if (event.state().nodes().getLocalNode().isDataNode() == false && - isWatcherStoppedManually == false && this.state.get() == WatcherState.STOPPED) { + isWatcherStoppedManually == false && isStoppedOrStopping) { this.state.set(WatcherState.STARTING); watcherService.start(event.state(), () -> this.state.set(WatcherState.STARTED)); return; @@ -99,8 +107,20 @@ public class WatcherLifeCycleService implements ClusterStateListener { if (isWatcherStoppedManually) { if (this.state.get() == WatcherState.STARTED) { clearAllocationIds(); - watcherService.stop("watcher manually marked to shutdown by cluster state update"); - this.state.set(WatcherState.STOPPED); + boolean stopping = this.state.compareAndSet(WatcherState.STARTED, WatcherState.STOPPING); + if (stopping) { + //waiting to set state to stopped until after all currently running watches are finished + watcherService.stop("watcher manually marked to shutdown by cluster state update", () -> { + //only transition from stopping -> stopped (which may not be the case if restarted quickly) + boolean stopped = state.compareAndSet(WatcherState.STOPPING, WatcherState.STOPPED); + if (stopped) { + logger.info("watcher has stopped"); + } else { + logger.info("watcher has not been stopped. not currently in a stopping state, current state [{}]", state.get()); + } + + }); + } } return; } @@ -142,7 +162,7 @@ public class WatcherLifeCycleService implements ClusterStateListener { previousShardRoutings.set(localAffectedShardRoutings); if (state.get() == WatcherState.STARTED) { watcherService.reload(event.state(), "new local watcher shard allocation ids"); - } else if (state.get() == WatcherState.STOPPED) { + } else if (isStoppedOrStopping) { this.state.set(WatcherState.STARTING); watcherService.start(event.state(), () -> this.state.set(WatcherState.STARTED)); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java index c96203bd642..32031e78f5e 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherService.java @@ -35,6 +35,7 @@ import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.upgrade.UpgradeField; +import org.elasticsearch.xpack.core.watcher.WatcherState; import org.elasticsearch.xpack.core.watcher.execution.TriggeredWatchStoreField; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.watcher.execution.ExecutionService; @@ -144,24 +145,29 @@ public class WatcherService { } /** - * Stops the watcher service and marks its services as paused + * Stops the watcher service and marks its services as paused. Callers should set the Watcher state to {@link WatcherState#STOPPING} + * prior to calling this method. + * + * @param stoppedListener The listener that will set Watcher state to: {@link WatcherState#STOPPED}, may not be {@code null} */ - public void stop(String reason) { + public void stop(String reason, Runnable stoppedListener) { + assert stoppedListener != null; logger.info("stopping watch service, reason [{}]", reason); - executionService.pause(); + executionService.pause(stoppedListener); triggerService.pauseExecution(); } /** * shuts down the trigger service as well to make sure there are no lingering threads - * also no need to check anything, as this is final, we just can go to status STOPPED + * + * @param stoppedListener The listener that will set Watcher state to: {@link WatcherState#STOPPED}, may not be {@code null} */ - void shutDown() { + void shutDown(Runnable stoppedListener) { + assert stoppedListener != null; logger.info("stopping watch service, reason [shutdown initiated]"); - executionService.pause(); + executionService.pause(stoppedListener); triggerService.stop(); stopExecutor(); - logger.debug("watch service has stopped"); } void stopExecutor() { @@ -185,7 +191,7 @@ public class WatcherService { processedClusterStateVersion.set(state.getVersion()); triggerService.pauseExecution(); - int cancelledTaskCount = executionService.clearExecutionsAndQueue(); + int cancelledTaskCount = executionService.clearExecutionsAndQueue(() -> {}); logger.info("reloading watcher, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); executor.execute(wrapWatcherService(() -> reloadInner(state, reason, false), @@ -256,7 +262,7 @@ public class WatcherService { */ public void pauseExecution(String reason) { triggerService.pauseExecution(); - int cancelledTaskCount = executionService.pause(); + int cancelledTaskCount = executionService.pause(() -> {}); logger.info("paused watch execution, reason [{}], cancelled [{}] queued tasks", reason, cancelledTaskCount); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/CurrentExecutions.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/CurrentExecutions.java index 95ac8030036..9e76cbcffca 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/CurrentExecutions.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/execution/CurrentExecutions.java @@ -5,8 +5,11 @@ */ package org.elasticsearch.xpack.watcher.execution; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.watcher.WatcherState; import java.util.Iterator; import java.util.concurrent.ConcurrentHashMap; @@ -19,6 +22,7 @@ import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalSta public final class CurrentExecutions implements Iterable { + private static final Logger logger = LogManager.getLogger(CurrentExecutions.class); private final ConcurrentMap currentExecutions = new ConcurrentHashMap<>(); // the condition of the lock is used to wait and signal the finishing of all executions on shutdown private final ReentrantLock lock = new ReentrantLock(); @@ -63,9 +67,12 @@ public final class CurrentExecutions implements Iterable()); - this.clearExecutions(); + this.clearExecutions(stoppedListener); return cancelledTaskCount; } @@ -280,8 +289,10 @@ public class ExecutionService { ctx.setNodeId(clusterService.localNode().getId()); WatchRecord record = null; final String watchId = ctx.id().watchId(); + //pull this to a local reference since the class reference can be swapped, and need to ensure same object is used for put/remove + final CurrentExecutions currentExecutions = this.currentExecutions.get(); try { - boolean executionAlreadyExists = currentExecutions.get().put(watchId, new WatchExecution(ctx, Thread.currentThread())); + boolean executionAlreadyExists = currentExecutions.put(watchId, new WatchExecution(ctx, Thread.currentThread())); if (executionAlreadyExists) { logger.trace("not executing watch [{}] because it is already queued", watchId); record = ctx.abortBeforeExecution(ExecutionState.NOT_EXECUTED_ALREADY_QUEUED, "Watch is already queued in thread pool"); @@ -336,7 +347,7 @@ public class ExecutionService { triggeredWatchStore.delete(ctx.id()); } - currentExecutions.get().remove(watchId); + currentExecutions.remove(watchId); logger.debug("finished [{}]/[{}]", watchId, ctx.id()); } return record; @@ -580,11 +591,15 @@ public class ExecutionService { /** * This clears out the current executions and sets new empty current executions * This is needed, because when this method is called, watcher keeps running, so sealing executions would be a bad idea + * + * @param stoppedListener The listener that will set Watcher state to: {@link WatcherState#STOPPED}, may be a no-op assuming the + * {@link WatcherState#STOPPED} is set elsewhere or not needed to be set. */ - private void clearExecutions() { + private void clearExecutions(Runnable stoppedListener) { + assert stoppedListener != null; final CurrentExecutions currentExecutionsBeforeSetting = currentExecutions.getAndSet(new CurrentExecutions()); // clear old executions in background, no need to wait - genericExecutor.execute(() -> currentExecutionsBeforeSetting.sealAndAwaitEmpty(maxStopTimeout)); + genericExecutor.execute(() -> currentExecutionsBeforeSetting.sealAndAwaitEmpty(maxStopTimeout, stoppedListener)); } // the watch execution task takes another runnable as parameter diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java index c6c041a6571..083390c98b9 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/NotificationService.java @@ -95,7 +95,7 @@ public abstract class NotificationService { final Settings completeSettings = completeSettingsBuilder.build(); // obtain account names and create accounts final Set accountNames = getAccountNames(completeSettings); - this.accounts = createAccounts(completeSettings, accountNames, this::createAccount); + this.accounts = createAccounts(completeSettings, accountNames, (name, accountSettings) -> createAccount(name, accountSettings)); this.defaultAccount = findDefaultAccountOrNull(completeSettings, this.accounts); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java index b6a6e259ecc..2079b2bbfb6 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Account.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.watcher.notification.email; import org.apache.logging.log4j.Logger; import org.elasticsearch.SpecialPermission; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; @@ -22,6 +23,8 @@ import javax.mail.Session; import javax.mail.Transport; import javax.mail.internet.InternetAddress; import javax.mail.internet.MimeMessage; +import javax.net.SocketFactory; +import javax.net.ssl.SSLSocketFactory; import java.security.AccessController; import java.security.PrivilegedAction; import java.security.PrivilegedActionException; @@ -184,7 +187,7 @@ public class Account { final Smtp smtp; final EmailDefaults defaults; - Config(String name, Settings settings) { + Config(String name, Settings settings, @Nullable SSLSocketFactory sslSocketFactory) { this.name = name; profile = Profile.resolve(settings.get("profile"), Profile.STANDARD); defaults = new EmailDefaults(name, settings.getAsSettings("email_defaults")); @@ -193,6 +196,9 @@ public class Account { String msg = "missing required email account setting for account [" + name + "]. 'smtp.host' must be configured"; throw new SettingsException(msg); } + if (sslSocketFactory != null) { + smtp.setSocketFactory(sslSocketFactory); + } } public Session createSession() { @@ -220,7 +226,7 @@ public class Account { /** * Finds a setting, and then a secure setting if the setting is null, or returns null if one does not exist. This differs * from other getSetting calls in that it allows for null whereas the other methods throw an exception. - * + *

* Note: if your setting was not previously secure, than the string reference that is in the setting object is still * insecure. This is only constructing a new SecureString with the char[] of the insecure setting. */ @@ -274,6 +280,10 @@ public class Account { settings.put(newKey, TimeValue.parseTimeValue(value, currentKey).millis()); } } + + public void setSocketFactory(SocketFactory socketFactory) { + this.properties.put("mail.smtp.ssl.socketFactory", socketFactory); + } } /** diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java index de7161dcdd1..5b9705b9f38 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/EmailService.java @@ -15,15 +15,20 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ssl.SSLConfiguration; +import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; +import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.watcher.crypto.CryptoService; import org.elasticsearch.xpack.watcher.notification.NotificationService; import javax.mail.MessagingException; - +import javax.net.ssl.SSLSocketFactory; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import static org.elasticsearch.xpack.core.watcher.WatcherField.EMAIL_NOTIFICATION_SSL_PREFIX; + /** * A component to store email credentials and handle sending email notifications. */ @@ -101,13 +106,17 @@ public class EmailService extends NotificationService { Setting.affixKeySetting("xpack.notification.email.account.", "smtp.wait_on_quit", (key) -> Setting.boolSetting(key, true, Property.Dynamic, Property.NodeScope)); + private static final SSLConfigurationSettings SSL_SETTINGS = SSLConfigurationSettings.withPrefix(EMAIL_NOTIFICATION_SSL_PREFIX); + private static final Logger logger = LogManager.getLogger(EmailService.class); private final CryptoService cryptoService; + private final SSLService sslService; - public EmailService(Settings settings, @Nullable CryptoService cryptoService, ClusterSettings clusterSettings) { + public EmailService(Settings settings, @Nullable CryptoService cryptoService, SSLService sslService, ClusterSettings clusterSettings) { super("email", settings, clusterSettings, EmailService.getDynamicSettings(), EmailService.getSecureSettings()); this.cryptoService = cryptoService; + this.sslService = sslService; // ensure logging of setting changes clusterSettings.addSettingsUpdateConsumer(SETTING_DEFAULT_ACCOUNT, (s) -> {}); clusterSettings.addAffixUpdateConsumer(SETTING_PROFILE, (s, o) -> {}, (s, o) -> {}); @@ -132,10 +141,19 @@ public class EmailService extends NotificationService { @Override protected Account createAccount(String name, Settings accountSettings) { - Account.Config config = new Account.Config(name, accountSettings); + Account.Config config = new Account.Config(name, accountSettings, getSmtpSslSocketFactory()); return new Account(config, cryptoService, logger); } + @Nullable + private SSLSocketFactory getSmtpSslSocketFactory() { + final SSLConfiguration sslConfiguration = sslService.getSSLConfiguration(EMAIL_NOTIFICATION_SSL_PREFIX); + if (sslConfiguration == null) { + return null; + } + return sslService.sslSocketFactory(sslConfiguration); + } + public EmailSent send(Email email, Authentication auth, Profile profile, String accountName) throws MessagingException { Account account = getAccount(accountName); if (account == null) { @@ -189,6 +207,7 @@ public class EmailService extends NotificationService { public static List> getSettings() { List> allSettings = new ArrayList>(EmailService.getDynamicSettings()); allSettings.addAll(EmailService.getSecureSettings()); + allSettings.addAll(SSL_SETTINGS.getAllSettings()); return allSettings; } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java index 548583ac14b..cf6c2c5ac66 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.core.watcher.WatcherMetaData; import org.elasticsearch.xpack.core.watcher.WatcherState; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.junit.Before; +import org.mockito.ArgumentCaptor; import org.mockito.stubbing.Answer; import java.util.Collections; @@ -133,8 +134,8 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { when(watcherService.validate(clusterState)).thenReturn(true); lifeCycleService.shutDown(); - verify(watcherService, never()).stop(anyString()); - verify(watcherService, times(1)).shutDown(); + verify(watcherService, never()).stop(anyString(), any()); + verify(watcherService, times(1)).shutDown(any()); reset(watcherService); lifeCycleService.clusterChanged(new ClusterChangedEvent("any", clusterState, clusterState)); @@ -175,7 +176,12 @@ public class WatcherLifeCycleServiceTests extends ESTestCase { .build(); lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", stoppedClusterState, clusterState)); - verify(watcherService, times(1)).stop(eq("watcher manually marked to shutdown by cluster state update")); + ArgumentCaptor captor = ArgumentCaptor.forClass(Runnable.class); + verify(watcherService, times(1)) + .stop(eq("watcher manually marked to shutdown by cluster state update"), captor.capture()); + assertEquals(WatcherState.STOPPING, lifeCycleService.getState()); + captor.getValue().run(); + assertEquals(WatcherState.STOPPED, lifeCycleService.getState()); // Starting via cluster state update, as the watcher metadata block is removed/set to true reset(watcherService); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index f4ee831266b..e67512ee694 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -269,8 +269,8 @@ public class WatcherServiceTests extends ESTestCase { csBuilder.metaData(MetaData.builder()); service.reload(csBuilder.build(), "whatever"); - verify(executionService).clearExecutionsAndQueue(); - verify(executionService, never()).pause(); + verify(executionService).clearExecutionsAndQueue(any()); + verify(executionService, never()).pause(any()); verify(triggerService).pauseExecution(); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailMessageIdTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailMessageIdTests.java index 495ac99fb9e..a7d9862fd7a 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailMessageIdTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailMessageIdTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.watch.Payload; import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; @@ -30,6 +31,7 @@ import java.util.List; import java.util.Set; import static org.hamcrest.Matchers.hasSize; +import static org.mockito.Mockito.mock; public class EmailMessageIdTests extends ESTestCase { @@ -56,7 +58,7 @@ public class EmailMessageIdTests extends ESTestCase { Set> registeredSettings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); registeredSettings.addAll(EmailService.getSettings()); ClusterSettings clusterSettings = new ClusterSettings(settings, registeredSettings); - emailService = new EmailService(settings, null, clusterSettings); + emailService = new EmailService(settings, null, mock(SSLService.class), clusterSettings); EmailTemplate emailTemplate = EmailTemplate.builder().from("from@example.org").to("to@example.org") .subject("subject").textBody("body").build(); emailAction = new EmailAction(emailTemplate, null, null, null, null, null); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailSslTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailSslTests.java new file mode 100644 index 00000000000..c4b0b657b9d --- /dev/null +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/email/EmailSslTests.java @@ -0,0 +1,148 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.watcher.actions.email; + +import org.apache.http.ssl.SSLContextBuilder; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; +import org.elasticsearch.xpack.core.watcher.watch.Payload; +import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine; +import org.elasticsearch.xpack.watcher.notification.email.EmailService; +import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate; +import org.elasticsearch.xpack.watcher.notification.email.HtmlSanitizer; +import org.elasticsearch.xpack.watcher.notification.email.support.EmailServer; +import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine; +import org.elasticsearch.xpack.watcher.test.WatcherTestUtils; +import org.hamcrest.Matchers; +import org.junit.After; +import org.junit.Before; + +import javax.mail.MessagingException; +import javax.mail.internet.MimeMessage; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLException; +import java.io.IOException; +import java.io.InputStream; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.hamcrest.Matchers.hasSize; + +public class EmailSslTests extends ESTestCase { + + private EmailServer server; + private TextTemplateEngine textTemplateEngine = new MockTextTemplateEngine(); + private HtmlSanitizer htmlSanitizer = new HtmlSanitizer(Settings.EMPTY); + + @Before + public void startSmtpServer() throws GeneralSecurityException, IOException { + final KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); + final char[] keystorePassword = "test-smtp".toCharArray(); + try (InputStream is = getDataInputStream("test-smtp.p12")) { + keyStore.load(is, keystorePassword); + } + final SSLContext sslContext = new SSLContextBuilder().loadKeyMaterial(keyStore, keystorePassword).build(); + server = EmailServer.localhost(logger, sslContext); + } + + @After + public void stopSmtpServer() { + server.stop(); + } + + public void testFailureSendingMessageToSmtpServerWithUntrustedCertificateAuthority() throws Exception { + final Settings.Builder settings = Settings.builder(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + final ExecutableEmailAction emailAction = buildEmailAction(settings, secureSettings); + final WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(); + final MessagingException exception = expectThrows(MessagingException.class, + () -> emailAction.execute("my_action_id", ctx, Payload.EMPTY)); + final List allCauses = getAllCauses(exception); + assertThat(allCauses, Matchers.hasItem(Matchers.instanceOf(SSLException.class))); + } + + public void testCanSendMessageToSmtpServerUsingTrustStore() throws Exception { + List messages = new ArrayList<>(); + server.addListener(messages::add); + try { + final Settings.Builder settings = Settings.builder() + .put("xpack.notification.email.ssl.truststore.path", getDataPath("test-smtp.p12")); + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("xpack.notification.email.ssl.truststore.secure_password", "test-smtp"); + + ExecutableEmailAction emailAction = buildEmailAction(settings, secureSettings); + + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(); + emailAction.execute("my_action_id", ctx, Payload.EMPTY); + + assertThat(messages, hasSize(1)); + } finally { + server.clearListeners(); + } + } + + public void testCanSendMessageToSmtpServerByDisablingVerification() throws Exception { + List messages = new ArrayList<>(); + server.addListener(messages::add); + try { + final Settings.Builder settings = Settings.builder().put("xpack.notification.email.ssl.verification_mode", "none"); + final MockSecureSettings secureSettings = new MockSecureSettings(); + ExecutableEmailAction emailAction = buildEmailAction(settings, secureSettings); + + WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(); + emailAction.execute("my_action_id", ctx, Payload.EMPTY); + + assertThat(messages, hasSize(1)); + } finally { + server.clearListeners(); + } + } + + private ExecutableEmailAction buildEmailAction(Settings.Builder baseSettings, MockSecureSettings secureSettings) { + secureSettings.setString("xpack.notification.email.account.test.smtp.secure_password", EmailServer.PASSWORD); + Settings settings = baseSettings + .put("path.home", createTempDir()) + .put("xpack.notification.email.account.test.smtp.auth", true) + .put("xpack.notification.email.account.test.smtp.user", EmailServer.USERNAME) + .put("xpack.notification.email.account.test.smtp.port", server.port()) + .put("xpack.notification.email.account.test.smtp.host", "localhost") + .setSecureSettings(secureSettings) + .build(); + + Set> registeredSettings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + registeredSettings.addAll(EmailService.getSettings()); + ClusterSettings clusterSettings = new ClusterSettings(settings, registeredSettings); + SSLService sslService = new SSLService(settings, TestEnvironment.newEnvironment(settings)); + final EmailService emailService = new EmailService(settings, null, sslService, clusterSettings); + EmailTemplate emailTemplate = EmailTemplate.builder().from("from@example.org").to("to@example.org") + .subject("subject").textBody("body").build(); + final EmailAction emailAction = new EmailAction(emailTemplate, null, null, null, null, null); + return new ExecutableEmailAction(emailAction, logger, emailService, textTemplateEngine, htmlSanitizer, Collections.emptyMap()); + } + + private List getAllCauses(Exception exception) { + final List allCauses = new ArrayList<>(); + Throwable cause = exception.getCause(); + while (cause != null) { + allCauses.add(cause); + cause = cause.getCause(); + } + return allCauses; + } + +} + diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java index 0fa05e900e5..9790540f44d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/NotificationServiceTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.watcher.notification.NotificationService; import java.io.IOException; import java.io.InputStream; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/AccountTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/AccountTests.java index 5e87a4305fe..38509feaca4 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/AccountTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/AccountTests.java @@ -141,7 +141,7 @@ public class AccountTests extends ESTestCase { Settings settings = builder.build(); - Account.Config config = new Account.Config(accountName, settings); + Account.Config config = new Account.Config(accountName, settings, null); assertThat(config.profile, is(profile)); assertThat(config.defaults, equalTo(emailDefaults)); @@ -165,7 +165,7 @@ public class AccountTests extends ESTestCase { .put("smtp.port", server.port()) .put("smtp.user", EmailServer.USERNAME) .setSecureSettings(secureSettings) - .build()), null, logger); + .build(), null), null, logger); Email email = Email.builder() .id("_id") @@ -202,7 +202,7 @@ public class AccountTests extends ESTestCase { .put("smtp.port", server.port()) .put("smtp.user", EmailServer.USERNAME) .setSecureSettings(secureSettings) - .build()), null, logger); + .build(), null), null, logger); Email email = Email.builder() .id("_id") @@ -240,7 +240,7 @@ public class AccountTests extends ESTestCase { Account account = new Account(new Account.Config("default", Settings.builder() .put("smtp.host", "localhost") .put("smtp.port", server.port()) - .build()), null, logger); + .build(), null), null, logger); Email email = Email.builder() .id("_id") @@ -264,7 +264,7 @@ public class AccountTests extends ESTestCase { Account account = new Account(new Account.Config("default", Settings.builder() .put("smtp.host", "localhost") .put("smtp.port", server.port()) - .build()), null, logger); + .build(), null), null, logger); Properties mailProperties = account.getConfig().smtp.properties; assertThat(mailProperties.get("mail.smtp.connectiontimeout"), is(String.valueOf(TimeValue.timeValueMinutes(2).millis()))); @@ -279,7 +279,7 @@ public class AccountTests extends ESTestCase { .put("smtp.connection_timeout", TimeValue.timeValueMinutes(4)) .put("smtp.write_timeout", TimeValue.timeValueMinutes(6)) .put("smtp.timeout", TimeValue.timeValueMinutes(8)) - .build()), null, logger); + .build(), null), null, logger); Properties mailProperties = account.getConfig().smtp.properties; @@ -294,7 +294,7 @@ public class AccountTests extends ESTestCase { .put("smtp.host", "localhost") .put("smtp.port", server.port()) .put("smtp.connection_timeout", 4000) - .build()), null, logger); + .build(), null), null, logger); }); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/AccountsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/AccountsTests.java index 7060dcab0eb..99e010faa4f 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/AccountsTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/AccountsTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ssl.SSLService; import java.util.HashSet; @@ -16,13 +17,14 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isOneOf; import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Mockito.mock; public class AccountsTests extends ESTestCase { public void testSingleAccount() throws Exception { Settings.Builder builder = Settings.builder() .put("default_account", "account1"); addAccountSettings("account1", builder); - EmailService service = new EmailService(builder.build(), null, + EmailService service = new EmailService(builder.build(), null, mock(SSLService.class), new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); Account account = service.getAccount("account1"); assertThat(account, notNullValue()); @@ -35,7 +37,7 @@ public class AccountsTests extends ESTestCase { public void testSingleAccountNoExplicitDefault() throws Exception { Settings.Builder builder = Settings.builder(); addAccountSettings("account1", builder); - EmailService service = new EmailService(builder.build(), null, + EmailService service = new EmailService(builder.build(), null, mock(SSLService.class), new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); Account account = service.getAccount("account1"); assertThat(account, notNullValue()); @@ -51,7 +53,7 @@ public class AccountsTests extends ESTestCase { addAccountSettings("account1", builder); addAccountSettings("account2", builder); - EmailService service = new EmailService(builder.build(), null, + EmailService service = new EmailService(builder.build(), null, mock(SSLService.class), new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); Account account = service.getAccount("account1"); assertThat(account, notNullValue()); @@ -70,7 +72,7 @@ public class AccountsTests extends ESTestCase { addAccountSettings("account1", builder); addAccountSettings("account2", builder); - EmailService service = new EmailService(builder.build(), null, + EmailService service = new EmailService(builder.build(), null, mock(SSLService.class), new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); Account account = service.getAccount("account1"); assertThat(account, notNullValue()); @@ -88,13 +90,14 @@ public class AccountsTests extends ESTestCase { addAccountSettings("account1", builder); addAccountSettings("account2", builder); ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())); - SettingsException e = expectThrows(SettingsException.class, () -> new EmailService(builder.build(), null, clusterSettings)); + SettingsException e = expectThrows(SettingsException.class, + () -> new EmailService(builder.build(), null, mock(SSLService.class), clusterSettings)); assertThat(e.getMessage(), is("could not find default account [unknown]")); } public void testNoAccount() throws Exception { Settings.Builder builder = Settings.builder(); - EmailService service = new EmailService(builder.build(), null, + EmailService service = new EmailService(builder.build(), null, mock(SSLService.class), new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); expectThrows(IllegalArgumentException.class, () -> service.getAccount(null)); } @@ -102,7 +105,8 @@ public class AccountsTests extends ESTestCase { public void testNoAccountWithDefaultAccount() throws Exception { Settings settings = Settings.builder().put("xpack.notification.email.default_account", "unknown").build(); ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings())); - SettingsException e = expectThrows(SettingsException.class, () -> new EmailService(settings, null, clusterSettings)); + SettingsException e = expectThrows(SettingsException.class, + () -> new EmailService(settings, null, mock(SSLService.class), clusterSettings)); assertThat(e.getMessage(), is("could not find default account [unknown]")); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java index 88bc500f10a..e6a61cdad52 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/EmailServiceTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.watcher.notification.email; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.watcher.common.secret.Secret; import org.junit.Before; @@ -32,7 +33,7 @@ public class EmailServiceTests extends ESTestCase { public void init() throws Exception { account = mock(Account.class); service = new EmailService(Settings.builder().put("xpack.notification.email.account.account1.foo", "bar").build(), null, - new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))) { + mock(SSLService.class), new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))) { @Override protected Account createAccount(String name, Settings accountSettings) { return account; @@ -70,7 +71,7 @@ public class EmailServiceTests extends ESTestCase { .put("xpack.notification.email.account.account5.smtp.wait_on_quit", true) .put("xpack.notification.email.account.account5.smtp.ssl.trust", "host1,host2,host3") .build(); - EmailService emailService = new EmailService(settings, null, + EmailService emailService = new EmailService(settings, null, mock(SSLService.class), new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); Account account1 = emailService.getAccount("account1"); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/ProfileTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/ProfileTests.java index 8ab3e38550d..da8f788f94f 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/ProfileTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/ProfileTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.watcher.notification.email; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.ssl.SSLService; import javax.mail.BodyPart; import javax.mail.Part; @@ -19,6 +20,7 @@ import java.util.HashSet; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; public class ProfileTests extends ESTestCase { @@ -40,7 +42,7 @@ public class ProfileTests extends ESTestCase { .put("xpack.notification.email.account.foo.smtp.host", "_host") .build(); - EmailService service = new EmailService(settings, null, + EmailService service = new EmailService(settings, null, mock(SSLService.class), new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); Session session = service.getAccount("foo").getConfig().createSession(); MimeMessage mimeMessage = Profile.STANDARD.toMimeMessage(email, session); @@ -62,4 +64,4 @@ public class ProfileTests extends ESTestCase { assertThat("Expected to find an inline attachment in mime message, but didnt", foundInlineAttachment, is(true)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/support/EmailServer.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/support/EmailServer.java index 4195b251392..dc49e23ca7d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/support/EmailServer.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/notification/email/support/EmailServer.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.watcher.notification.email.support; import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Nullable; import org.subethamail.smtp.auth.EasyAuthenticationHandlerFactory; import org.subethamail.smtp.helper.SimpleMessageListener; import org.subethamail.smtp.helper.SimpleMessageListenerAdapter; @@ -14,8 +15,13 @@ import org.subethamail.smtp.server.SMTPServer; import javax.mail.MessagingException; import javax.mail.Session; import javax.mail.internet.MimeMessage; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSocket; +import javax.net.ssl.SSLSocketFactory; import java.io.IOException; import java.io.InputStream; +import java.net.InetSocketAddress; +import java.net.Socket; import java.security.AccessController; import java.security.PrivilegedAction; import java.util.List; @@ -37,8 +43,8 @@ public class EmailServer { private final List listeners = new CopyOnWriteArrayList<>(); private final SMTPServer server; - public EmailServer(String host, final Logger logger) { - server = new SMTPServer(new SimpleMessageListenerAdapter(new SimpleMessageListener() { + public EmailServer(String host, @Nullable SSLContext sslContext, final Logger logger) { + final SimpleMessageListenerAdapter listener = new SimpleMessageListenerAdapter(new SimpleMessageListener() { @Override public boolean accept(String from, String recipient) { return true; @@ -49,9 +55,9 @@ public class EmailServer { try { Session session = Session.getInstance(new Properties()); MimeMessage msg = new MimeMessage(session, data); - for (Listener listener : listeners) { + for (Listener listener1 : listeners) { try { - listener.on(msg); + listener1.on(msg); } catch (Exception e) { logger.error("Unexpected failure", e); fail(e.getMessage()); @@ -61,12 +67,33 @@ public class EmailServer { throw new RuntimeException("could not create mime message", me); } } - }), new EasyAuthenticationHandlerFactory((user, passwd) -> { + }); + final EasyAuthenticationHandlerFactory authentication = new EasyAuthenticationHandlerFactory((user, passwd) -> { assertThat(user, is(USERNAME)); assertThat(passwd, is(PASSWORD)); - })); + }); + server = new SMTPServer(listener, authentication) { + @Override + public SSLSocket createSSLSocket(Socket socket) throws IOException { + if (sslContext == null) { + return super.createSSLSocket(socket); + } else { + SSLSocketFactory factory = sslContext.getSocketFactory(); + InetSocketAddress remoteAddress = (InetSocketAddress) socket.getRemoteSocketAddress(); + SSLSocket sslSocket = (SSLSocket) factory.createSocket(socket, remoteAddress.getHostString(), socket.getPort(), true); + sslSocket.setUseClientMode(false); + sslSocket.setEnabledCipherSuites(sslSocket.getSupportedCipherSuites()); + return sslSocket; + } + } + }; server.setHostName(host); server.setPort(0); + if (sslContext != null) { + server.setEnableTLS(true); + server.setRequireTLS(true); + server.setHideTLS(false); + } } /** @@ -93,8 +120,16 @@ public class EmailServer { listeners.add(listener); } + public void clearListeners() { + this.listeners.clear(); + } + public static EmailServer localhost(final Logger logger) { - EmailServer server = new EmailServer("localhost", logger); + return localhost(logger, null); + } + + public static EmailServer localhost(final Logger logger, @Nullable SSLContext sslContext) { + EmailServer server = new EmailServer("localhost", sslContext, logger); server.start(); return server; } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java index 5a59ba24762..ddea3e9e0e4 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/AbstractWatcherIntegrationTestCase.java @@ -45,6 +45,7 @@ import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.xpack.core.XPackClient; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.watcher.WatcherState; import org.elasticsearch.xpack.core.watcher.client.WatcherClient; import org.elasticsearch.xpack.core.watcher.execution.ExecutionState; @@ -96,6 +97,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsNot.not; +import static org.mockito.Mockito.mock; @ClusterScope(scope = SUITE, numClientNodes = 0, transportClientRatio = 0, maxNumDataNodes = 3) public abstract class AbstractWatcherIntegrationTestCase extends ESIntegTestCase { @@ -538,10 +540,12 @@ public abstract class AbstractWatcherIntegrationTestCase extends ESIntegTestCase WatcherStatsResponse watcherStatsResponse = watcherClient().prepareWatcherStats().get(); assertThat(watcherStatsResponse.hasFailures(), is(false)); List> currentStatesFromStatsRequest = watcherStatsResponse.getNodes().stream() - .map(response -> Tuple.tuple(response.getNode().getName(), response.getWatcherState())) - .collect(Collectors.toList()); + .map(response -> Tuple.tuple(response.getNode().getName() + " (" + response.getThreadPoolQueueSize() + ")", + response.getWatcherState())).collect(Collectors.toList()); List states = currentStatesFromStatsRequest.stream().map(Tuple::v2).collect(Collectors.toList()); + + logger.info("waiting to stop watcher, current states {}", currentStatesFromStatsRequest); boolean isAllStateStarted = states.stream().allMatch(w -> w == WatcherState.STARTED); @@ -566,13 +570,14 @@ public abstract class AbstractWatcherIntegrationTestCase extends ESIntegTestCase } throw new AssertionError("unexpected state, retrying with next run"); - }); + }, 30, TimeUnit.SECONDS); } public static class NoopEmailService extends EmailService { public NoopEmailService() { - super(Settings.EMPTY, null, new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); + super(Settings.EMPTY, null, mock(SSLService.class), + new ClusterSettings(Settings.EMPTY, new HashSet<>(EmailService.getSettings()))); } @Override diff --git a/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/test-smtp.p12 b/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/test-smtp.p12 new file mode 100644 index 00000000000..b0a748c73c3 Binary files /dev/null and b/x-pack/plugin/watcher/src/test/resources/org/elasticsearch/xpack/watcher/actions/email/test-smtp.p12 differ diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java index 2e63c56e247..371b5667c7d 100644 --- a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java @@ -234,7 +234,6 @@ public class CcrRollingUpgradeIT extends AbstractMultiClusterUpgradeTestCase { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/45641") public void testBiDirectionalIndexFollowing() throws Exception { logger.info("clusterName={}, upgradeState={}", clusterName, upgradeState); diff --git a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/file/tool/UsersToolTests.java b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/file/tool/UsersToolTests.java index 09ef53052b1..47b0f4697dc 100644 --- a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/file/tool/UsersToolTests.java +++ b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/authc/file/tool/UsersToolTests.java @@ -299,7 +299,7 @@ public class UsersToolTests extends CommandTestCase { public void testParseUnknownRole() throws Exception { UsersTool.parseRoles(terminal, TestEnvironment.newEnvironment(settings), "test_r1,r2,r3"); - String output = terminal.getOutput(); + String output = terminal.getErrorOutput(); assertTrue(output, output.contains("The following roles [r2,r3] are not in the [")); } diff --git a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/support/FileAttributesCheckerTests.java b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/support/FileAttributesCheckerTests.java index 12ae440e8f7..af2959410fd 100644 --- a/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/support/FileAttributesCheckerTests.java +++ b/x-pack/qa/security-tools-tests/src/test/java/org/elasticsearch/xpack/security/support/FileAttributesCheckerTests.java @@ -28,6 +28,7 @@ public class FileAttributesCheckerTests extends ESTestCase { MockTerminal terminal = new MockTerminal(); checker.check(terminal); assertTrue(terminal.getOutput(), terminal.getOutput().isEmpty()); + assertTrue(terminal.getErrorOutput(), terminal.getErrorOutput().isEmpty()); } public void testNoPosix() throws Exception { @@ -38,6 +39,7 @@ public class FileAttributesCheckerTests extends ESTestCase { MockTerminal terminal = new MockTerminal(); checker.check(terminal); assertTrue(terminal.getOutput(), terminal.getOutput().isEmpty()); + assertTrue(terminal.getErrorOutput(), terminal.getErrorOutput().isEmpty()); } } @@ -51,6 +53,7 @@ public class FileAttributesCheckerTests extends ESTestCase { MockTerminal terminal = new MockTerminal(); checker.check(terminal); assertTrue(terminal.getOutput(), terminal.getOutput().isEmpty()); + assertTrue(terminal.getErrorOutput(), terminal.getErrorOutput().isEmpty()); } } @@ -71,7 +74,7 @@ public class FileAttributesCheckerTests extends ESTestCase { MockTerminal terminal = new MockTerminal(); checker.check(terminal); - String output = terminal.getOutput(); + String output = terminal.getErrorOutput(); assertTrue(output, output.contains("permissions of [" + path + "] have changed")); } } @@ -89,7 +92,7 @@ public class FileAttributesCheckerTests extends ESTestCase { MockTerminal terminal = new MockTerminal(); checker.check(terminal); - String output = terminal.getOutput(); + String output = terminal.getErrorOutput(); assertTrue(output, output.contains("Owner of file [" + path + "] used to be")); } } @@ -107,7 +110,7 @@ public class FileAttributesCheckerTests extends ESTestCase { MockTerminal terminal = new MockTerminal(); checker.check(terminal); - String output = terminal.getOutput(); + String output = terminal.getErrorOutput(); assertTrue(output, output.contains("Group of file [" + path + "] used to be")); } } diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java index 679bc08f01f..9ec458067dc 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityClientYamlTestSuiteIT.java @@ -19,6 +19,7 @@ import org.junit.After; import org.junit.Before; import java.util.Collections; +import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -108,7 +109,7 @@ public class SmokeTestWatcherWithSecurityClientYamlTestSuiteIT extends ESClientY default: throw new AssertionError("unknown state[" + state + "]"); } - }); + }, 30, TimeUnit.SECONDS); } @Override diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index e0da00f29d4..3bad41a1393 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -22,6 +22,7 @@ import org.junit.Before; import java.io.IOException; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -91,8 +92,7 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { @After public void stopWatcher() throws Exception { - adminClient().performRequest(new Request("DELETE", "/my_test_index")); - + assertBusy(() -> { try { Response statsResponse = adminClient().performRequest(new Request("GET", "/_watcher/stats")); @@ -118,7 +118,9 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { } catch (IOException e) { throw new AssertionError(e); } - }); + }, 30, TimeUnit.SECONDS); + + adminClient().performRequest(new Request("DELETE", "/my_test_index")); } @Override diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java index 3df9512298e..b720f0620de 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherTestSuiteIT.java @@ -20,6 +20,7 @@ import org.junit.Before; import java.io.IOException; import java.util.Map; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -92,7 +93,7 @@ public class SmokeTestWatcherTestSuiteIT extends ESRestTestCase { default: throw new AssertionError("unknown state[" + state + "]"); } - }); + }, 30, TimeUnit.SECONDS); } @Override diff --git a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java index 2dd5cc86a89..3a1155d562d 100644 --- a/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java +++ b/x-pack/qa/smoke-test-watcher/src/test/java/org/elasticsearch/smoketest/WatcherRestIT.java @@ -14,6 +14,8 @@ import org.elasticsearch.xpack.test.rest.XPackRestTestConstants; import org.junit.After; import org.junit.Before; +import java.util.concurrent.TimeUnit; + import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; @@ -90,6 +92,6 @@ public class WatcherRestIT extends ESClientYamlSuiteTestCase { default: throw new AssertionError("unknown state[" + state + "]"); } - }); + }, 30, TimeUnit.SECONDS); } } diff --git a/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java b/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java index 8f8792f2697..c95c89a7ba9 100644 --- a/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java +++ b/x-pack/qa/third-party/jira/src/test/java/org/elasticsearch/smoketest/WatcherJiraYamlTestSuiteIT.java @@ -17,6 +17,7 @@ import org.junit.Before; import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -70,6 +71,6 @@ public class WatcherJiraYamlTestSuiteIT extends ESClientYamlSuiteTestCase { } catch (IOException e) { throw new AssertionError(e); } - }); + }, 30, TimeUnit.SECONDS); } } diff --git a/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java b/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java index b9a628f71f9..64de13f8375 100644 --- a/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java +++ b/x-pack/qa/third-party/pagerduty/src/test/java/org/elasticsearch/smoketest/WatcherPagerDutyYamlTestSuiteIT.java @@ -17,6 +17,7 @@ import org.junit.Before; import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -70,6 +71,6 @@ public class WatcherPagerDutyYamlTestSuiteIT extends ESClientYamlSuiteTestCase { } catch (IOException e) { throw new AssertionError(e); } - }); + }, 30, TimeUnit.SECONDS); } } diff --git a/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java b/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java index 01eeae442b2..a1e2938817b 100644 --- a/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java +++ b/x-pack/qa/third-party/slack/src/test/java/org/elasticsearch/smoketest/WatcherSlackYamlTestSuiteIT.java @@ -17,6 +17,7 @@ import org.junit.Before; import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -70,6 +71,6 @@ public class WatcherSlackYamlTestSuiteIT extends ESClientYamlSuiteTestCase { } catch (IOException e) { throw new AssertionError(e); } - }); + }, 30, TimeUnit.SECONDS); } } diff --git a/x-pack/snapshot-tool/src/test/java/org/elasticsearch/snapshots/S3CleanupTests.java b/x-pack/snapshot-tool/src/test/java/org/elasticsearch/snapshots/S3CleanupTests.java index 3358eedf4c8..5a80103f0b5 100644 --- a/x-pack/snapshot-tool/src/test/java/org/elasticsearch/snapshots/S3CleanupTests.java +++ b/x-pack/snapshot-tool/src/test/java/org/elasticsearch/snapshots/S3CleanupTests.java @@ -155,6 +155,7 @@ public class S3CleanupTests extends ESSingleNodeTestCase { } } finally { logger.info("Cleanup command output:\n" + terminal.getOutput()); + logger.info("Cleanup command standard error:\n" + terminal.getErrorOutput()); } return terminal;