From 9ee7b3743e3f7cb77f0cdf15d1acfccd4ee238c3 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Mon, 14 Oct 2019 10:40:24 +0300 Subject: [PATCH 01/16] Add FIPS 140 mode to XPack Usage API (#47278) (#47976) This change adds support for the FIPS 140 mode feature to be retrieved via the XPack Usage API. --- .../core/security/SecurityFeatureSetUsage.java | 13 ++++++++++++- .../xpack/security/SecurityFeatureSet.java | 7 ++++++- .../xpack/security/SecurityFeatureSetTests.java | 7 +++++++ 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java index a3fc9570b02..39a2ee8681f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java @@ -27,6 +27,7 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage { private static final String AUDIT_XFIELD = "audit"; private static final String IP_FILTER_XFIELD = "ipfilter"; private static final String ANONYMOUS_XFIELD = "anonymous"; + private static final String FIPS_140_XFIELD = "fips_140"; private Map realmsUsage; private Map rolesStoreUsage; @@ -37,6 +38,7 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage { private Map ipFilterUsage; private Map anonymousUsage; private Map roleMappingStoreUsage; + private Map fips140Usage; public SecurityFeatureSetUsage(StreamInput in) throws IOException { super(in); @@ -55,13 +57,17 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage { } anonymousUsage = in.readMap(); roleMappingStoreUsage = in.readMap(); + if (in.getVersion().onOrAfter(Version.V_7_5_0)) { + fips140Usage = in.readMap(); + } } public SecurityFeatureSetUsage(boolean available, boolean enabled, Map realmsUsage, Map rolesStoreUsage, Map roleMappingStoreUsage, Map sslUsage, Map auditUsage, Map ipFilterUsage, Map anonymousUsage, - Map tokenServiceUsage, Map apiKeyServiceUsage) { + Map tokenServiceUsage, Map apiKeyServiceUsage, + Map fips140Usage) { super(XPackField.SECURITY, available, enabled); this.realmsUsage = realmsUsage; this.rolesStoreUsage = rolesStoreUsage; @@ -72,6 +78,7 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage { this.auditUsage = auditUsage; this.ipFilterUsage = ipFilterUsage; this.anonymousUsage = anonymousUsage; + this.fips140Usage = fips140Usage; } @Override @@ -92,6 +99,9 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage { } out.writeMap(anonymousUsage); out.writeMap(roleMappingStoreUsage); + if (out.getVersion().onOrAfter(Version.V_7_5_0)) { + out.writeMap(fips140Usage); + } } @Override @@ -107,6 +117,7 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage { builder.field(AUDIT_XFIELD, auditUsage); builder.field(IP_FILTER_XFIELD, ipFilterUsage); builder.field(ANONYMOUS_XFIELD, anonymousUsage); + builder.field(FIPS_140_XFIELD, fips140Usage); } else if (sslUsage.isEmpty() == false) { // A trial (or basic) license can have SSL without security. // This is because security defaults to disabled on that license, but that dynamic-default does not disable SSL. diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java index dde4cb692e4..345c656f6d6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java @@ -30,6 +30,7 @@ import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.singletonMap; import static org.elasticsearch.xpack.core.XPackSettings.API_KEY_SERVICE_ENABLED_SETTING; +import static org.elasticsearch.xpack.core.XPackSettings.FIPS_MODE_ENABLED; import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; import static org.elasticsearch.xpack.core.XPackSettings.TOKEN_SERVICE_ENABLED_SETTING; import static org.elasticsearch.xpack.core.XPackSettings.TRANSPORT_SSL_ENABLED; @@ -95,6 +96,7 @@ public class SecurityFeatureSet implements XPackFeatureSet { Map auditUsage = auditUsage(settings); Map ipFilterUsage = ipFilterUsage(ipFilter); Map anonymousUsage = singletonMap("enabled", AnonymousUser.isAnonymousEnabled(settings)); + Map fips140Usage = fips140Usage(settings); final AtomicReference> rolesUsageRef = new AtomicReference<>(); final AtomicReference> roleMappingUsageRef = new AtomicReference<>(); @@ -104,7 +106,7 @@ public class SecurityFeatureSet implements XPackFeatureSet { if (countDown.countDown()) { listener.onResponse(new SecurityFeatureSetUsage(available(), enabled(), realmsUsageRef.get(), rolesUsageRef.get(), roleMappingUsageRef.get(), sslUsage, auditUsage, ipFilterUsage, anonymousUsage, tokenServiceUsage, - apiKeyServiceUsage)); + apiKeyServiceUsage, fips140Usage)); } }; @@ -184,4 +186,7 @@ public class SecurityFeatureSet implements XPackFeatureSet { return ipFilter.usageStats(); } + static Map fips140Usage(Settings settings) { + return singletonMap("enabled", FIPS_MODE_ENABLED.get(settings)); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java index c9d8c68347a..5f3ce4a41b2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java @@ -149,6 +149,10 @@ public class SecurityFeatureSetTests extends ESTestCase { if (anonymousEnabled) { settings.put(AnonymousUser.ROLES_SETTING.getKey(), "foo"); } + final boolean fips140Enabled = randomBoolean(); + if (fips140Enabled) { + settings.put("xpack.security.fips_mode.enabled", true); + } SecurityFeatureSet featureSet = new SecurityFeatureSet(settings.build(), licenseState, realms, rolesStore, roleMappingStore, ipFilter); @@ -216,6 +220,9 @@ public class SecurityFeatureSetTests extends ESTestCase { // anonymous assertThat(source.getValue("anonymous.enabled"), is(anonymousEnabled)); + + // FIPS 140 + assertThat(source.getValue("fips_140.enabled"), is(fips140Enabled)); } else { assertThat(source.getValue("realms"), is(nullValue())); assertThat(source.getValue("ssl"), is(nullValue())); From ef02a736ca2de551267f1a3eb6328eec87510858 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 14 Oct 2019 13:06:36 +0200 Subject: [PATCH 02/16] Don't apply the plugin's reader wrapper in can_match phase (#47816) This change modifies the local execution of the `can_match` phase to **not** apply the plugin's reader wrapper (if it is configured) when acquiring the searcher. We must ensure that the phase runs quickly and since we don't know the cost of applying the wrapper it is preferable to avoid it entirely. The can_match phase can aford false positives so it is also safe for the builtin plugins that use this functionality. Closes #46817 --- .../elasticsearch/index/shard/IndexShard.java | 9 ++++ .../elasticsearch/search/SearchService.java | 14 ++++-- .../search/SearchServiceTests.java | 50 ++++++++++++++++++- 3 files changed, 68 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index db3eeda719b..6a6bdf91758 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1216,6 +1216,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl getEngine().failEngine(reason, e); } + /** + * Acquire the searcher without applying the additional reader wrapper. + */ + public Engine.Searcher acquireSearcherNoWrap(String source) { + readAllowed(); + markSearcherAccessed(); + return getEngine().acquireSearcher(source, Engine.SearcherScope.EXTERNAL); + } + public Engine.Searcher acquireSearcher(String source) { return acquireSearcher(source, Engine.SearcherScope.EXTERNAL); } diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 0b352028a75..5035fb3e554 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -1012,10 +1012,16 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv */ public boolean canMatch(ShardSearchRequest request) throws IOException { assert request.searchType() == SearchType.QUERY_THEN_FETCH : "unexpected search type: " + request.searchType(); - try (DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, false, "can_match")) { - SearchSourceBuilder source = context.request().source(); - if (canRewriteToMatchNone(source)) { - QueryBuilder queryBuilder = source.query(); + IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); + IndexShard indexShard = indexService.getShard(request.shardId().getId()); + // we don't want to use the reader wrapper since it could run costly operations + // and we can afford false positives. + try (Engine.Searcher searcher = indexShard.acquireSearcherNoWrap("can_match")) { + QueryShardContext context = indexService.newQueryShardContext(request.shardId().id(), searcher, + request::nowInMillis, request.getClusterAlias()); + Rewriteable.rewrite(request.getRewriteable(), context, false); + if (canRewriteToMatchNone(request.source())) { + QueryBuilder queryBuilder = request.source().query(); return queryBuilder instanceof MatchNoneQueryBuilder == false; } return true; // null query means match_all diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 0dd8d4d6e67..c09b2a80e5d 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -19,6 +19,9 @@ package org.elasticsearch.search; import com.carrotsearch.hppc.IntArrayList; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FilterDirectoryReader; +import org.apache.lucene.index.LeafReader; import org.apache.lucene.search.Query; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.ElasticsearchException; @@ -76,6 +79,7 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.junit.Before; import java.io.IOException; import java.util.Collection; @@ -88,6 +92,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import static java.util.Collections.singletonList; @@ -111,7 +116,42 @@ public class SearchServiceTests extends ESSingleNodeTestCase { @Override protected Collection> getPlugins() { - return pluginList(FailOnRewriteQueryPlugin.class, CustomScriptPlugin.class, InternalOrPrivateSettingsPlugin.class); + return pluginList(FailOnRewriteQueryPlugin.class, CustomScriptPlugin.class, + ReaderWrapperCountPlugin.class, InternalOrPrivateSettingsPlugin.class); + } + + public static class ReaderWrapperCountPlugin extends Plugin { + @Override + public void onIndexModule(IndexModule indexModule) { + indexModule.setReaderWrapper(service -> SearchServiceTests::apply); + } + } + + @Before + private void resetCount() { + numWrapInvocations = new AtomicInteger(0); + } + + private static AtomicInteger numWrapInvocations = new AtomicInteger(0); + private static DirectoryReader apply(DirectoryReader directoryReader) throws IOException { + numWrapInvocations.incrementAndGet(); + return new FilterDirectoryReader(directoryReader, + new FilterDirectoryReader.SubReaderWrapper() { + @Override + public LeafReader wrap(LeafReader reader) { + return reader; + } + }) { + @Override + protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException { + return in; + } + + @Override + public CacheHelper getReaderCacheHelper() { + return directoryReader.getReaderCacheHelper(); + } + }; } public static class CustomScriptPlugin extends MockScriptPlugin { @@ -559,6 +599,7 @@ public class SearchServiceTests extends ESSingleNodeTestCase { final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index")); final IndexShard indexShard = indexService.getShard(0); SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true); + int numWrapReader = numWrapInvocations.get(); assertTrue(service.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 1, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, -1, null, null))); @@ -582,6 +623,13 @@ public class SearchServiceTests extends ESSingleNodeTestCase { searchRequest.source(new SearchSourceBuilder().query(new MatchNoneQueryBuilder())); assertFalse(service.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 1, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, -1, null, null))); + assertEquals(numWrapReader, numWrapInvocations.get()); + + // make sure that the wrapper is called when the context is actually created + service.createContext(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, + indexShard.shardId(), 1, new AliasFilter(null, Strings.EMPTY_ARRAY), + 1f, -1, null, null)).close(); + assertEquals(numWrapReader+1, numWrapInvocations.get()); } public void testCanRewriteToMatchNone() { From f48981f43c34545cc072be9201cd6f6eeb9ac55c Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 14 Oct 2019 14:34:09 +0200 Subject: [PATCH 03/16] Remove redundant nested operator in builtin grok expression. (#47870) This prevents the following warning from being printed to console: `regular expression has redundant nested repeat operator + /%\{(?(?[A-z0-9]+)(?::(?[[:alnum:]@\[\]_:.-]+))?)(?:=(?(?:(?:[^{}]+|\.+)+)+))?\}/` The current grok expression is not failing, but just this warning is being printed. The warning started being printed after upgrading joni (#47374). Closes #47861 --- libs/grok/src/main/java/org/elasticsearch/grok/Grok.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java index 07f75fd995b..a7a9a4a6f55 100644 --- a/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/elasticsearch/grok/Grok.java @@ -55,9 +55,7 @@ public final class Grok { "(?::(?[[:alnum:]@\\[\\]_:.-]+))?" + ")" + "(?:=(?" + - "(?:" + "(?:[^{}]+|\\.+)+" + - ")+" + ")" + ")?" + "\\}"; private static final Regex GROK_PATTERN_REGEX = new Regex(GROK_PATTERN.getBytes(StandardCharsets.UTF_8), 0, From c2a3e834279b56174d09c15906a70f3cbf2d1fca Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 14 Oct 2019 16:19:34 +0200 Subject: [PATCH 04/16] Remove unused transport action from TransportFreezeIndexAction (#47992) Removes unnecessary TransportCloseIndexAction from TransportFreezeIndexAction --- .../xpack/frozen/action/TransportFreezeIndexAction.java | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/action/TransportFreezeIndexAction.java b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/action/TransportFreezeIndexAction.java index 46b68c3748a..0364bb52278 100644 --- a/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/action/TransportFreezeIndexAction.java +++ b/x-pack/plugin/frozen-indices/src/main/java/org/elasticsearch/xpack/frozen/action/TransportFreezeIndexAction.java @@ -10,7 +10,6 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; -import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DestructiveOperations; @@ -50,20 +49,17 @@ public final class TransportFreezeIndexAction extends private final DestructiveOperations destructiveOperations; private final MetaDataIndexStateService indexStateService; - private final TransportCloseIndexAction transportCloseIndexAction; @Inject public TransportFreezeIndexAction(MetaDataIndexStateService indexStateService, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - DestructiveOperations destructiveOperations, - TransportCloseIndexAction transportCloseIndexAction) { + DestructiveOperations destructiveOperations) { super(FreezeIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, FreezeRequest::new, indexNameExpressionResolver); this.destructiveOperations = destructiveOperations; this.indexStateService = indexStateService; - this.transportCloseIndexAction = transportCloseIndexAction; } @Override protected String executor() { From 2b1372adfd86998512669304e35fdf7ad76e9351 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Mon, 14 Oct 2019 17:55:46 +0300 Subject: [PATCH 05/16] File based role mappings vs the role mapping APIs (#47015) (#47978) Make clear in the docs that the role mapping APIs is the preferred way to manage role mappings and that the role mappings that are defined in files cannot be viewed or managed with the APIs --- .../security/create-role-mappings.asciidoc | 8 ++++++-- .../security/delete-role-mappings.asciidoc | 7 ++++++- .../security/get-role-mappings.asciidoc | 7 ++++++- .../authorization/mapping-roles.asciidoc | 18 ++++++++++++++++++ 4 files changed, 36 insertions(+), 4 deletions(-) diff --git a/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc index 735e1d474e0..c325406c8ee 100644 --- a/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc @@ -24,8 +24,12 @@ Creates and updates role mappings. ==== {api-description-title} Role mappings define which roles are assigned to each user. Each mapping has -_rules_ that identify users and a list of _roles_ that are -granted to those users. +_rules_ that identify users and a list of _roles_ that are granted to those users. + +The role mapping APIs are generally the preferred way to manage role mappings +rather than using {stack-ov}/mapping-roles.html#mapping-roles-file[role mapping files]. +The create or update role mappings API cannot update role mappings that are defined +in role mapping files. NOTE: This API does not create roles. Rather, it maps users to existing roles. Roles can be created by using <> or diff --git a/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc index 489aa944805..c5dd1aa9c90 100644 --- a/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc +++ b/x-pack/docs/en/rest-api/security/delete-role-mappings.asciidoc @@ -21,7 +21,12 @@ Removes role mappings. ==== {api-description-title} Role mappings define which roles are assigned to each user. For more information, -see <>. +see <>. + +The role mapping APIs are generally the preferred way to manage role mappings +rather than using <>. +The delete role mappings API cannot remove role mappings that are defined +in role mapping files. [[security-api-delete-role-mapping-path-params]] ==== {api-path-parms-title} diff --git a/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc index 5243d775250..6fa79fe8a03 100644 --- a/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-role-mappings.asciidoc @@ -23,7 +23,12 @@ Retrieves role mappings. ==== {api-description-title} Role mappings define which roles are assigned to each user. For more information, -see <>. +see <>. + +The role mapping APIs are generally the preferred way to manage role mappings +rather than using <>. +The get role mappings API cannot retrieve role mappings that are defined +in role mapping files. [[security-api-get-role-mapping-path-params]] ==== {api-path-parms-title} diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index c202508caa3..012c8fc4ed9 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -66,6 +66,24 @@ You can change this default behavior by changing the this is a common setting in Elasticsearch, changing its value might effect other schedules in the system. +While the _role mapping APIs_ is he preferred way to manage role mappings, using +the `role_mappings.yml` file becomes useful in a couple of use cases: + +. If you want to define fixed role mappings that no one (besides an administrator +with physical access to the {es} nodes) would be able to change. + +. If cluster administration depends on users from external realms and these users +need to have their roles mapped to them even when the cluster is RED. For instance +an administrator that authenticates via LDAP or PKI and gets assigned an +administrator role so that they can perform corrective actions. + +Please note however, that the role_mappings.yml file is provided +as a minimal administrative function and is not intended to cover and be used to +define roles for all use cases. + +IMPORTANT: You cannot view, edit, or remove any roles that are defined in the role +mapping files by using the the role mapping APIs. + ==== Realm specific details [float] [[ldap-role-mapping]] From 508db4589bc2497b144f89afa47b0b77278e713e Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Mon, 14 Oct 2019 11:17:11 -0400 Subject: [PATCH 06/16] [ML][Transforms] signal listener early on stop failure (#47954) (#48002) --- .../action/TransportStopTransformAction.java | 49 +++++++++++++ .../TransportStopTransformActionTests.java | 68 +++++++++++++++++++ 2 files changed, 117 insertions(+) diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java index 721a0928b0b..52bbc74917c 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java @@ -46,6 +46,8 @@ import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.xpack.core.transform.TransformMessages.CANNOT_STOP_FAILED_TRANSFORM; @@ -196,6 +198,13 @@ public class TransportStopTransformAction extends TransportTasksAction { + // If there were failures attempting to stop the tasks, we don't know if they will actually stop. + // It is better to respond to the user now than allow for the persistent task waiting to timeout + if (response.getTaskFailures().isEmpty() == false || response.getNodeFailures().isEmpty() == false) { + RestStatus status = firstNotOKStatus(response.getTaskFailures(), response.getNodeFailures()); + listener.onFailure(buildException(response.getTaskFailures(), response.getNodeFailures(), status)); + return; + } // Wait until the persistent task is stopped // Switch over to Generic threadpool so we don't block the network thread threadPool.generic().execute(() -> @@ -205,6 +214,46 @@ public class TransportStopTransformAction extends TransportTasksAction taskOperationFailures, + List elasticsearchExceptions, + RestStatus status) { + List exceptions = Stream.concat( + taskOperationFailures.stream().map(TaskOperationFailure::getCause), + elasticsearchExceptions.stream()).collect(Collectors.toList()); + + ElasticsearchStatusException elasticsearchStatusException = + new ElasticsearchStatusException(exceptions.get(0).getMessage(), status); + + for (int i = 1; i < exceptions.size(); i++) { + elasticsearchStatusException.addSuppressed(exceptions.get(i)); + } + return elasticsearchStatusException; + } + + static RestStatus firstNotOKStatus(List taskOperationFailures, List exceptions) { + RestStatus status = RestStatus.OK; + + for (TaskOperationFailure taskOperationFailure : taskOperationFailures) { + status = taskOperationFailure.getStatus(); + if (RestStatus.OK.equals(status) == false) { + break; + } + } + if (status == RestStatus.OK) { + for (ElasticsearchException exception : exceptions) { + // As it stands right now, this will ALWAYS be INTERNAL_SERVER_ERROR. + // FailedNodeException does not overwrite the `status()` method and the logic in ElasticsearchException + // Just returns an INTERNAL_SERVER_ERROR + status = exception.status(); + if (RestStatus.OK.equals(status) == false) { + break; + } + } + } + // If all the previous exceptions don't have a valid status, we have an unknown error. + return status == RestStatus.OK ? RestStatus.INTERNAL_SERVER_ERROR : status; + } + private void waitForTransformStopped(Set persistentTaskIds, TimeValue timeout, boolean force, diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java index 9fcc44d7389..0ca86c3657f 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java @@ -5,12 +5,15 @@ */ package org.elasticsearch.xpack.transform.action; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; +import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.transform.TransformMessages; @@ -18,8 +21,10 @@ import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; import org.elasticsearch.xpack.core.transform.transforms.TransformState; import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.List; import static org.elasticsearch.rest.RestStatus.CONFLICT; import static org.hamcrest.Matchers.equalTo; @@ -91,4 +96,67 @@ public class TransportStopTransformActionTests extends ESTestCase { "task has failed"))); } + public void testFirstNotOKStatus() { + List nodeFailures = new ArrayList<>(); + List taskOperationFailures = new ArrayList<>(); + + nodeFailures.add(new ElasticsearchException("nodefailure", + new ElasticsearchStatusException("failure", RestStatus.UNPROCESSABLE_ENTITY))); + taskOperationFailures.add(new TaskOperationFailure("node", + 1, + new ElasticsearchStatusException("failure", RestStatus.BAD_REQUEST))); + + assertThat(TransportStopTransformAction.firstNotOKStatus(Collections.emptyList(), Collections.emptyList()), + equalTo(RestStatus.INTERNAL_SERVER_ERROR)); + + assertThat(TransportStopTransformAction.firstNotOKStatus(taskOperationFailures, Collections.emptyList()), + equalTo(RestStatus.BAD_REQUEST)); + assertThat(TransportStopTransformAction.firstNotOKStatus(taskOperationFailures, nodeFailures), + equalTo(RestStatus.BAD_REQUEST)); + assertThat(TransportStopTransformAction.firstNotOKStatus(taskOperationFailures, + Collections.singletonList(new ElasticsearchException(new ElasticsearchStatusException("not failure", RestStatus.OK)))), + equalTo(RestStatus.BAD_REQUEST)); + + assertThat(TransportStopTransformAction.firstNotOKStatus( + Collections.singletonList(new TaskOperationFailure( + "node", + 1, + new ElasticsearchStatusException("not failure", RestStatus.OK))), + nodeFailures), + equalTo(RestStatus.INTERNAL_SERVER_ERROR)); + + assertThat(TransportStopTransformAction.firstNotOKStatus( + Collections.emptyList(), + nodeFailures), + equalTo(RestStatus.INTERNAL_SERVER_ERROR)); + } + + public void testBuildException() { + List nodeFailures = new ArrayList<>(); + List taskOperationFailures = new ArrayList<>(); + + nodeFailures.add(new ElasticsearchException("node failure")); + taskOperationFailures.add(new TaskOperationFailure("node", + 1, + new ElasticsearchStatusException("task failure", RestStatus.BAD_REQUEST))); + + RestStatus status = CONFLICT; + ElasticsearchStatusException statusException = + TransportStopTransformAction.buildException(taskOperationFailures, nodeFailures, status); + + assertThat(statusException.status(), equalTo(status)); + assertThat(statusException.getMessage(), equalTo(taskOperationFailures.get(0).getCause().getMessage())); + assertThat(statusException.getSuppressed().length, equalTo(1)); + + statusException = TransportStopTransformAction.buildException(Collections.emptyList(), nodeFailures, status); + assertThat(statusException.status(), equalTo(status)); + assertThat(statusException.getMessage(), equalTo(nodeFailures.get(0).getMessage())); + assertThat(statusException.getSuppressed().length, equalTo(0)); + + statusException = TransportStopTransformAction.buildException(taskOperationFailures, Collections.emptyList(), status); + assertThat(statusException.status(), equalTo(status)); + assertThat(statusException.getMessage(), equalTo(taskOperationFailures.get(0).getCause().getMessage())); + assertThat(statusException.getSuppressed().length, equalTo(0)); + } + } From 5f3ef2e09ca31561b6beb6773d8d3cf3dbf96054 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Mon, 14 Oct 2019 11:38:46 -0400 Subject: [PATCH 07/16] [DOCS] Remove unsupported `local` and `master_timeout` parms from cat API docs (#47933) --- docs/reference/cat/alias.asciidoc | 2 -- docs/reference/cat/count.asciidoc | 4 ---- docs/reference/cat/fielddata.asciidoc | 4 ---- docs/reference/cat/health.asciidoc | 4 ---- docs/reference/cat/recovery.asciidoc | 4 ---- docs/reference/cat/snapshots.asciidoc | 2 -- docs/reference/cat/tasks.asciidoc | 2 -- .../src/main/resources/rest-api-spec/api/cat.aliases.json | 4 ---- .../src/main/resources/rest-api-spec/api/cat.count.json | 8 -------- .../main/resources/rest-api-spec/api/cat.fielddata.json | 8 -------- .../src/main/resources/rest-api-spec/api/cat.health.json | 8 -------- .../main/resources/rest-api-spec/api/cat.recovery.json | 4 ---- 12 files changed, 54 deletions(-) diff --git a/docs/reference/cat/alias.asciidoc b/docs/reference/cat/alias.asciidoc index f795b38e686..7259c8d7d0d 100644 --- a/docs/reference/cat/alias.asciidoc +++ b/docs/reference/cat/alias.asciidoc @@ -34,8 +34,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=help] include::{docdir}/rest-api/common-parms.asciidoc[tag=local] -include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] - include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s] include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] diff --git a/docs/reference/cat/count.asciidoc b/docs/reference/cat/count.asciidoc index 3b126545064..7ae127885f5 100644 --- a/docs/reference/cat/count.asciidoc +++ b/docs/reference/cat/count.asciidoc @@ -34,10 +34,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h] include::{docdir}/rest-api/common-parms.asciidoc[tag=help] -include::{docdir}/rest-api/common-parms.asciidoc[tag=local] - -include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] - include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s] include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v] diff --git a/docs/reference/cat/fielddata.asciidoc b/docs/reference/cat/fielddata.asciidoc index 3882848f703..27a21cc9fba 100644 --- a/docs/reference/cat/fielddata.asciidoc +++ b/docs/reference/cat/fielddata.asciidoc @@ -31,10 +31,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=bytes] include::{docdir}/rest-api/common-parms.asciidoc[tag=http-format] -include::{docdir}/rest-api/common-parms.asciidoc[tag=local] - -include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] - include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h] include::{docdir}/rest-api/common-parms.asciidoc[tag=help] diff --git a/docs/reference/cat/health.asciidoc b/docs/reference/cat/health.asciidoc index 667df47963a..f9a9003ac22 100644 --- a/docs/reference/cat/health.asciidoc +++ b/docs/reference/cat/health.asciidoc @@ -45,10 +45,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h] include::{docdir}/rest-api/common-parms.asciidoc[tag=help] -include::{docdir}/rest-api/common-parms.asciidoc[tag=local] - -include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] - include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s] include::{docdir}/rest-api/common-parms.asciidoc[tag=time] diff --git a/docs/reference/cat/recovery.asciidoc b/docs/reference/cat/recovery.asciidoc index fedb77a1099..4848adbe09d 100644 --- a/docs/reference/cat/recovery.asciidoc +++ b/docs/reference/cat/recovery.asciidoc @@ -50,10 +50,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=help] include::{docdir}/rest-api/common-parms.asciidoc[tag=index-query-parm] -include::{docdir}/rest-api/common-parms.asciidoc[tag=local] - -include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] - include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s] include::{docdir}/rest-api/common-parms.asciidoc[tag=time] diff --git a/docs/reference/cat/snapshots.asciidoc b/docs/reference/cat/snapshots.asciidoc index b5bf30edb3f..59a23fae00f 100644 --- a/docs/reference/cat/snapshots.asciidoc +++ b/docs/reference/cat/snapshots.asciidoc @@ -95,8 +95,6 @@ Reason for any snapshot failures. include::{docdir}/rest-api/common-parms.asciidoc[tag=help] -include::{docdir}/rest-api/common-parms.asciidoc[tag=local] - `ignore_unavailable`:: (Optional, boolean) If `true`, the response does not include information from unavailable snapshots. Defaults to `false`. diff --git a/docs/reference/cat/tasks.asciidoc b/docs/reference/cat/tasks.asciidoc index af208ef20cb..cc30486859c 100644 --- a/docs/reference/cat/tasks.asciidoc +++ b/docs/reference/cat/tasks.asciidoc @@ -51,8 +51,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=help] include::{docdir}/rest-api/common-parms.asciidoc[tag=node-id-query-parm] -include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] - include::{docdir}/rest-api/common-parms.asciidoc[tag=parent-task-id] include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json index 0f49d3a8b74..d37273cfd41 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.aliases.json @@ -36,10 +36,6 @@ "type":"boolean", "description":"Return local information, do not retrieve the state from master node (default: false)" }, - "master_timeout":{ - "type":"time", - "description":"Explicit operation timeout for connection to master node" - }, "h":{ "type":"list", "description":"Comma-separated list of column names to display" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json index 807e108e788..8cfaddf8db8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.count.json @@ -32,14 +32,6 @@ "type":"string", "description":"a short version of the Accept header, e.g. json, yaml" }, - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" - }, - "master_timeout":{ - "type":"time", - "description":"Explicit operation timeout for connection to master node" - }, "h":{ "type":"list", "description":"Comma-separated list of column names to display" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json index a3d05537f81..9fbde4736b5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.fielddata.json @@ -49,14 +49,6 @@ "pb" ] }, - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" - }, - "master_timeout":{ - "type":"time", - "description":"Explicit operation timeout for connection to master node" - }, "h":{ "type":"list", "description":"Comma-separated list of column names to display" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json index 50e2cf8c72a..7e79b7cc2c9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.health.json @@ -20,14 +20,6 @@ "type":"string", "description":"a short version of the Accept header, e.g. json, yaml" }, - "local":{ - "type":"boolean", - "description":"Return local information, do not retrieve the state from master node (default: false)" - }, - "master_timeout":{ - "type":"time", - "description":"Explicit operation timeout for connection to master node" - }, "h":{ "type":"list", "description":"Comma-separated list of column names to display" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json index c24e8854a72..64c102d0897 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cat.recovery.json @@ -59,10 +59,6 @@ "description":"If `true`, the response includes detailed information about shard recoveries", "default":false }, - "master_timeout":{ - "type":"time", - "description":"Explicit operation timeout for connection to master node" - }, "h":{ "type":"list", "description":"Comma-separated list of column names to display" From 1ca25bed385e16fcfa22406ecab00f794d776773 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Mon, 14 Oct 2019 17:19:13 +0100 Subject: [PATCH 08/16] [ML][7.x] Add option to stop datafeed that finds no data (#47995) Adds a new datafeed config option, max_empty_searches, that tells a datafeed that has never found any data to stop itself and close its associated job after a certain number of real-time searches have returned no data. Backport of #47922 --- .../client/ml/datafeed/DatafeedConfig.java | 29 +++++++-- .../client/ml/datafeed/DatafeedUpdate.java | 25 ++++++-- .../ml/datafeed/DatafeedConfigTests.java | 3 + .../ml/datafeed/DatafeedUpdateTests.java | 3 + .../apis/datafeedresource.asciidoc | 9 +++ .../apis/update-datafeed.asciidoc | 9 +++ .../core/ml/datafeed/DatafeedConfig.java | 42 +++++++++++-- .../core/ml/datafeed/DatafeedUpdate.java | 45 ++++++++++++-- .../core/ml/datafeed/DatafeedConfigTests.java | 30 +++++++++- .../core/ml/datafeed/DatafeedUpdateTests.java | 12 +++- .../xpack/ml/integration/DatafeedJobsIT.java | 59 ++++++++++++++++--- .../xpack/ml/datafeed/DatafeedJob.java | 18 ++++-- .../xpack/ml/datafeed/DatafeedJobBuilder.java | 6 +- .../xpack/ml/datafeed/DatafeedManager.java | 25 ++++++-- .../xpack/ml/datafeed/ProblemTracker.java | 12 ++-- .../xpack/ml/datafeed/DatafeedJobTests.java | 29 ++++----- .../ml/datafeed/DatafeedManagerTests.java | 7 ++- .../ml/datafeed/ProblemTrackerTests.java | 4 +- .../rest-api-spec/test/ml/datafeeds_crud.yml | 12 +++- 19 files changed, 310 insertions(+), 69 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java index f56da88303d..f192b420eba 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedConfig.java @@ -62,6 +62,7 @@ public class DatafeedConfig implements ToXContentObject { public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields"); public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config"); public static final ParseField DELAYED_DATA_CHECK_CONFIG = new ParseField("delayed_data_check_config"); + public static final ParseField MAX_EMPTY_SEARCHES = new ParseField("max_empty_searches"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "datafeed_config", true, a -> new Builder((String)a[0], (String)a[1])); @@ -88,6 +89,7 @@ public class DatafeedConfig implements ToXContentObject { PARSER.declareInt(Builder::setScrollSize, SCROLL_SIZE); PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, CHUNKING_CONFIG); PARSER.declareObject(Builder::setDelayedDataCheckConfig, DelayedDataCheckConfig.PARSER, DELAYED_DATA_CHECK_CONFIG); + PARSER.declareInt(Builder::setMaxEmptySearches, MAX_EMPTY_SEARCHES); } private static BytesReference parseBytes(XContentParser parser) throws IOException { @@ -107,11 +109,12 @@ public class DatafeedConfig implements ToXContentObject { private final Integer scrollSize; private final ChunkingConfig chunkingConfig; private final DelayedDataCheckConfig delayedDataCheckConfig; - + private final Integer maxEmptySearches; private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, BytesReference query, BytesReference aggregations, List scriptFields, Integer scrollSize, - ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) { + ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig, + Integer maxEmptySearches) { this.id = id; this.jobId = jobId; this.queryDelay = queryDelay; @@ -123,6 +126,7 @@ public class DatafeedConfig implements ToXContentObject { this.scrollSize = scrollSize; this.chunkingConfig = chunkingConfig; this.delayedDataCheckConfig = delayedDataCheckConfig; + this.maxEmptySearches = maxEmptySearches; } public String getId() { @@ -169,6 +173,10 @@ public class DatafeedConfig implements ToXContentObject { return delayedDataCheckConfig; } + public Integer getMaxEmptySearches() { + return maxEmptySearches; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -205,6 +213,9 @@ public class DatafeedConfig implements ToXContentObject { if (delayedDataCheckConfig != null) { builder.field(DELAYED_DATA_CHECK_CONFIG.getPreferredName(), delayedDataCheckConfig); } + if (maxEmptySearches != null) { + builder.field(MAX_EMPTY_SEARCHES.getPreferredName(), maxEmptySearches); + } builder.endObject(); return builder; @@ -245,7 +256,8 @@ public class DatafeedConfig implements ToXContentObject { && Objects.equals(asMap(this.aggregations), asMap(that.aggregations)) && Objects.equals(this.scriptFields, that.scriptFields) && Objects.equals(this.chunkingConfig, that.chunkingConfig) - && Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig); + && Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig) + && Objects.equals(this.maxEmptySearches, that.maxEmptySearches); } /** @@ -256,7 +268,7 @@ public class DatafeedConfig implements ToXContentObject { @Override public int hashCode() { return Objects.hash(id, jobId, frequency, queryDelay, indices, asMap(query), scrollSize, asMap(aggregations), scriptFields, - chunkingConfig, delayedDataCheckConfig); + chunkingConfig, delayedDataCheckConfig, maxEmptySearches); } public static Builder builder(String id, String jobId) { @@ -276,6 +288,7 @@ public class DatafeedConfig implements ToXContentObject { private Integer scrollSize; private ChunkingConfig chunkingConfig; private DelayedDataCheckConfig delayedDataCheckConfig; + private Integer maxEmptySearches; public Builder(String id, String jobId) { this.id = Objects.requireNonNull(id, ID.getPreferredName()); @@ -294,6 +307,7 @@ public class DatafeedConfig implements ToXContentObject { this.scrollSize = config.scrollSize; this.chunkingConfig = config.chunkingConfig; this.delayedDataCheckConfig = config.getDelayedDataCheckConfig(); + this.maxEmptySearches = config.getMaxEmptySearches(); } public Builder setIndices(List indices) { @@ -376,9 +390,14 @@ public class DatafeedConfig implements ToXContentObject { return this; } + public Builder setMaxEmptySearches(int maxEmptySearches) { + this.maxEmptySearches = maxEmptySearches; + return this; + } + public DatafeedConfig build() { return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, query, aggregations, scriptFields, scrollSize, - chunkingConfig, delayedDataCheckConfig); + chunkingConfig, delayedDataCheckConfig, maxEmptySearches); } private static BytesReference xContentToBytes(ToXContentObject object) throws IOException { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java index 96c4d9b2b9f..e11e2e1d9b3 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/datafeed/DatafeedUpdate.java @@ -79,6 +79,7 @@ public class DatafeedUpdate implements ToXContentObject { PARSER.declareObject(Builder::setDelayedDataCheckConfig, DelayedDataCheckConfig.PARSER, DatafeedConfig.DELAYED_DATA_CHECK_CONFIG); + PARSER.declareInt(Builder::setMaxEmptySearches, DatafeedConfig.MAX_EMPTY_SEARCHES); } private static BytesReference parseBytes(XContentParser parser) throws IOException { @@ -98,10 +99,12 @@ public class DatafeedUpdate implements ToXContentObject { private final Integer scrollSize; private final ChunkingConfig chunkingConfig; private final DelayedDataCheckConfig delayedDataCheckConfig; + private final Integer maxEmptySearches; private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, BytesReference query, BytesReference aggregations, List scriptFields, Integer scrollSize, - ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) { + ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig, + Integer maxEmptySearches) { this.id = id; this.jobId = jobId; this.queryDelay = queryDelay; @@ -113,6 +116,7 @@ public class DatafeedUpdate implements ToXContentObject { this.scrollSize = scrollSize; this.chunkingConfig = chunkingConfig; this.delayedDataCheckConfig = delayedDataCheckConfig; + this.maxEmptySearches = maxEmptySearches; } /** @@ -152,6 +156,7 @@ public class DatafeedUpdate implements ToXContentObject { } addOptionalField(builder, DatafeedConfig.SCROLL_SIZE, scrollSize); addOptionalField(builder, DatafeedConfig.CHUNKING_CONFIG, chunkingConfig); + addOptionalField(builder, DatafeedConfig.MAX_EMPTY_SEARCHES, maxEmptySearches); builder.endObject(); return builder; } @@ -202,6 +207,10 @@ public class DatafeedUpdate implements ToXContentObject { return delayedDataCheckConfig; } + public Integer getMaxEmptySearches() { + return maxEmptySearches; + } + private static Map asMap(BytesReference bytesReference) { return bytesReference == null ? null : XContentHelper.convertToMap(bytesReference, true, XContentType.JSON).v2(); } @@ -237,7 +246,8 @@ public class DatafeedUpdate implements ToXContentObject { && Objects.equals(asMap(this.aggregations), asMap(that.aggregations)) && Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig) && Objects.equals(this.scriptFields, that.scriptFields) - && Objects.equals(this.chunkingConfig, that.chunkingConfig); + && Objects.equals(this.chunkingConfig, that.chunkingConfig) + && Objects.equals(this.maxEmptySearches, that.maxEmptySearches); } /** @@ -248,7 +258,7 @@ public class DatafeedUpdate implements ToXContentObject { @Override public int hashCode() { return Objects.hash(id, jobId, frequency, queryDelay, indices, asMap(query), scrollSize, asMap(aggregations), scriptFields, - chunkingConfig, delayedDataCheckConfig); + chunkingConfig, delayedDataCheckConfig, maxEmptySearches); } public static Builder builder(String id) { @@ -268,6 +278,7 @@ public class DatafeedUpdate implements ToXContentObject { private Integer scrollSize; private ChunkingConfig chunkingConfig; private DelayedDataCheckConfig delayedDataCheckConfig; + private Integer maxEmptySearches; public Builder(String id) { this.id = Objects.requireNonNull(id, DatafeedConfig.ID.getPreferredName()); @@ -285,6 +296,7 @@ public class DatafeedUpdate implements ToXContentObject { this.scrollSize = config.scrollSize; this.chunkingConfig = config.chunkingConfig; this.delayedDataCheckConfig = config.delayedDataCheckConfig; + this.maxEmptySearches = config.maxEmptySearches; } @Deprecated @@ -364,9 +376,14 @@ public class DatafeedUpdate implements ToXContentObject { return this; } + public Builder setMaxEmptySearches(int maxEmptySearches) { + this.maxEmptySearches = maxEmptySearches; + return this; + } + public DatafeedUpdate build() { return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, query, aggregations, scriptFields, scrollSize, - chunkingConfig, delayedDataCheckConfig); + chunkingConfig, delayedDataCheckConfig, maxEmptySearches); } private static BytesReference xContentToBytes(ToXContentObject object) throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java index a3b475193e4..7f7a03ab2e1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java @@ -106,6 +106,9 @@ public class DatafeedConfigTests extends AbstractXContentTestCase>. +`max_empty_searches`:: + (integer) If a real-time {dfeed} has never seen any data (including during + any initial training period) then it will automatically stop itself and + close its associated job after this many real-time searches that return no + documents. In other words, it will stop after `frequency` times + `max_empty_searches` of real-time operation. If not set + then a {dfeed} with no end time that sees no data will remain started until + it is explicitly stopped. By default this setting is not set. + [[ml-datafeed-chunking-config]] ==== Chunking configuration objects diff --git a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc index 910bb727e97..732f23202b1 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc @@ -101,6 +101,15 @@ parallel and close one when you are satisfied with the results of the other job. (Optional, unsigned integer) The `size` parameter that is used in {es} searches. The default value is `1000`. +`max_empty_searches`:: + (Optional, integer) If a real-time {dfeed} has never seen any data (including + during any initial training period) then it will automatically stop itself + and close its associated job after this many real-time searches that return + no documents. In other words, it will stop after `frequency` times + `max_empty_searches` of real-time operation. If not set + then a {dfeed} with no end time that sees no data will remain started until + it is explicitly stopped. The special value `-1` unsets this setting. + For more information about these properties, see <>. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index 062b6d82f16..057c03161d8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -91,6 +91,7 @@ public class DatafeedConfig extends AbstractDiffable implements public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config"); public static final ParseField HEADERS = new ParseField("headers"); public static final ParseField DELAYED_DATA_CHECK_CONFIG = new ParseField("delayed_data_check_config"); + public static final ParseField MAX_EMPTY_SEARCHES = new ParseField("max_empty_searches"); // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly public static final ObjectParser LENIENT_PARSER = createParser(true); @@ -152,6 +153,7 @@ public class DatafeedConfig extends AbstractDiffable implements parser.declareObject(Builder::setDelayedDataCheckConfig, ignoreUnknownFields ? DelayedDataCheckConfig.LENIENT_PARSER : DelayedDataCheckConfig.STRICT_PARSER, DELAYED_DATA_CHECK_CONFIG); + parser.declareInt(Builder::setMaxEmptySearches, MAX_EMPTY_SEARCHES); return parser; } @@ -176,11 +178,12 @@ public class DatafeedConfig extends AbstractDiffable implements private final ChunkingConfig chunkingConfig; private final Map headers; private final DelayedDataCheckConfig delayedDataCheckConfig; + private final Integer maxEmptySearches; private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, QueryProvider queryProvider, AggProvider aggProvider, List scriptFields, Integer scrollSize, ChunkingConfig chunkingConfig, Map headers, - DelayedDataCheckConfig delayedDataCheckConfig) { + DelayedDataCheckConfig delayedDataCheckConfig, Integer maxEmptySearches) { this.id = id; this.jobId = jobId; this.queryDelay = queryDelay; @@ -193,6 +196,7 @@ public class DatafeedConfig extends AbstractDiffable implements this.chunkingConfig = chunkingConfig; this.headers = Collections.unmodifiableMap(headers); this.delayedDataCheckConfig = delayedDataCheckConfig; + this.maxEmptySearches = maxEmptySearches; } public DatafeedConfig(StreamInput in) throws IOException { @@ -233,6 +237,11 @@ public class DatafeedConfig extends AbstractDiffable implements } else { delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig(); } + if (in.getVersion().onOrAfter(Version.V_7_5_0)) { + maxEmptySearches = in.readOptionalVInt(); + } else { + maxEmptySearches = null; + } } /** @@ -401,6 +410,10 @@ public class DatafeedConfig extends AbstractDiffable implements return delayedDataCheckConfig; } + public Integer getMaxEmptySearches() { + return maxEmptySearches; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); @@ -439,6 +452,9 @@ public class DatafeedConfig extends AbstractDiffable implements if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeOptionalWriteable(delayedDataCheckConfig); } + if (out.getVersion().onOrAfter(Version.V_7_5_0)) { + out.writeOptionalVInt(maxEmptySearches); + } } @Override @@ -475,6 +491,9 @@ public class DatafeedConfig extends AbstractDiffable implements if (delayedDataCheckConfig != null) { builder.field(DELAYED_DATA_CHECK_CONFIG.getPreferredName(), delayedDataCheckConfig); } + if (maxEmptySearches != null) { + builder.field(MAX_EMPTY_SEARCHES.getPreferredName(), maxEmptySearches); + } builder.endObject(); return builder; } @@ -507,13 +526,14 @@ public class DatafeedConfig extends AbstractDiffable implements && Objects.equals(this.scriptFields, that.scriptFields) && Objects.equals(this.chunkingConfig, that.chunkingConfig) && Objects.equals(this.headers, that.headers) - && Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig); + && Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig) + && Objects.equals(this.maxEmptySearches, that.maxEmptySearches); } @Override public int hashCode() { return Objects.hash(id, jobId, frequency, queryDelay, indices, queryProvider, scrollSize, aggProvider, scriptFields, chunkingConfig, - headers, delayedDataCheckConfig); + headers, delayedDataCheckConfig, maxEmptySearches); } @Override @@ -586,6 +606,7 @@ public class DatafeedConfig extends AbstractDiffable implements private ChunkingConfig chunkingConfig; private Map headers = Collections.emptyMap(); private DelayedDataCheckConfig delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig(); + private Integer maxEmptySearches; public Builder() { } @@ -608,6 +629,7 @@ public class DatafeedConfig extends AbstractDiffable implements this.chunkingConfig = config.chunkingConfig; this.headers = new HashMap<>(config.headers); this.delayedDataCheckConfig = config.getDelayedDataCheckConfig(); + this.maxEmptySearches = config.getMaxEmptySearches(); } public void setId(String datafeedId) { @@ -701,6 +723,18 @@ public class DatafeedConfig extends AbstractDiffable implements this.delayedDataCheckConfig = delayedDataCheckConfig; } + public void setMaxEmptySearches(int maxEmptySearches) { + if (maxEmptySearches == -1) { + this.maxEmptySearches = null; + } else if (maxEmptySearches <= 0) { + String msg = Messages.getMessage(Messages.DATAFEED_CONFIG_INVALID_OPTION_VALUE, + DatafeedConfig.MAX_EMPTY_SEARCHES.getPreferredName(), maxEmptySearches); + throw ExceptionsHelper.badRequestException(msg); + } else { + this.maxEmptySearches = maxEmptySearches; + } + } + public DatafeedConfig build() { ExceptionsHelper.requireNonNull(id, ID.getPreferredName()); ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName()); @@ -716,7 +750,7 @@ public class DatafeedConfig extends AbstractDiffable implements setDefaultQueryDelay(); return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, queryProvider, aggProvider, scriptFields, scrollSize, - chunkingConfig, headers, delayedDataCheckConfig); + chunkingConfig, headers, delayedDataCheckConfig, maxEmptySearches); } void validateScriptFields() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index 1c7d6e46b0a..2b4dacc5834 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -81,6 +81,7 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { PARSER.declareObject(Builder::setDelayedDataCheckConfig, DelayedDataCheckConfig.STRICT_PARSER, DatafeedConfig.DELAYED_DATA_CHECK_CONFIG); + PARSER.declareInt(Builder::setMaxEmptySearches, DatafeedConfig.MAX_EMPTY_SEARCHES); } private final String id; @@ -94,11 +95,13 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { private final Integer scrollSize; private final ChunkingConfig chunkingConfig; private final DelayedDataCheckConfig delayedDataCheckConfig; + private final Integer maxEmptySearches; private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List indices, QueryProvider queryProvider, AggProvider aggProvider, List scriptFields, - Integer scrollSize, ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) { + Integer scrollSize, ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig, + Integer maxEmptySearches) { this.id = id; this.jobId = jobId; this.queryDelay = queryDelay; @@ -110,6 +113,7 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { this.scrollSize = scrollSize; this.chunkingConfig = chunkingConfig; this.delayedDataCheckConfig = delayedDataCheckConfig; + this.maxEmptySearches = maxEmptySearches; } public DatafeedUpdate(StreamInput in) throws IOException { @@ -147,6 +151,11 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { } else { delayedDataCheckConfig = null; } + if (in.getVersion().onOrAfter(Version.V_7_5_0)) { + maxEmptySearches = in.readOptionalInt(); + } else { + maxEmptySearches = null; + } } /** @@ -192,6 +201,9 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeOptionalWriteable(delayedDataCheckConfig); } + if (out.getVersion().onOrAfter(Version.V_7_5_0)) { + out.writeOptionalInt(maxEmptySearches); + } } @Override @@ -222,6 +234,8 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { addOptionalField(builder, DatafeedConfig.SCROLL_SIZE, scrollSize); addOptionalField(builder, DatafeedConfig.CHUNKING_CONFIG, chunkingConfig); addOptionalField(builder, DatafeedConfig.DELAYED_DATA_CHECK_CONFIG, delayedDataCheckConfig); + addOptionalField(builder, DatafeedConfig.MAX_EMPTY_SEARCHES, maxEmptySearches); + builder.endObject(); return builder; } @@ -290,6 +304,10 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { return delayedDataCheckConfig; } + public Integer getMaxEmptySearches() { + return maxEmptySearches; + } + /** * Applies the update to the given {@link DatafeedConfig} * @return a new {@link DatafeedConfig} that contains the update @@ -334,6 +352,9 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { if (delayedDataCheckConfig != null) { builder.setDelayedDataCheckConfig(delayedDataCheckConfig); } + if (maxEmptySearches != null) { + builder.setMaxEmptySearches(maxEmptySearches); + } if (headers.isEmpty() == false) { // Adjust the request, adding security headers from the current thread context @@ -373,13 +394,14 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { && Objects.equals(this.aggProvider, that.aggProvider) && Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig) && Objects.equals(this.scriptFields, that.scriptFields) - && Objects.equals(this.chunkingConfig, that.chunkingConfig); + && Objects.equals(this.chunkingConfig, that.chunkingConfig) + && Objects.equals(this.maxEmptySearches, that.maxEmptySearches); } @Override public int hashCode() { return Objects.hash(id, jobId, frequency, queryDelay, indices, queryProvider, scrollSize, aggProvider, scriptFields, chunkingConfig, - delayedDataCheckConfig); + delayedDataCheckConfig, maxEmptySearches); } @Override @@ -396,7 +418,8 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { && (aggProvider == null || Objects.equals(aggProvider.getAggs(), datafeed.getAggregations())) && (scriptFields == null || Objects.equals(scriptFields, datafeed.getScriptFields())) && (delayedDataCheckConfig == null || Objects.equals(delayedDataCheckConfig, datafeed.getDelayedDataCheckConfig())) - && (chunkingConfig == null || Objects.equals(chunkingConfig, datafeed.getChunkingConfig())); + && (chunkingConfig == null || Objects.equals(chunkingConfig, datafeed.getChunkingConfig())) + && (maxEmptySearches == null || Objects.equals(maxEmptySearches, datafeed.getMaxEmptySearches())); } public static class Builder { @@ -412,6 +435,7 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { private Integer scrollSize; private ChunkingConfig chunkingConfig; private DelayedDataCheckConfig delayedDataCheckConfig; + private Integer maxEmptySearches; public Builder() { } @@ -432,6 +456,7 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { this.scrollSize = config.scrollSize; this.chunkingConfig = config.chunkingConfig; this.delayedDataCheckConfig = config.delayedDataCheckConfig; + this.maxEmptySearches = config.maxEmptySearches; } public Builder setId(String datafeedId) { @@ -499,9 +524,19 @@ public class DatafeedUpdate implements Writeable, ToXContentObject { return this; } + public Builder setMaxEmptySearches(int maxEmptySearches) { + if (maxEmptySearches < -1 || maxEmptySearches == 0) { + String msg = Messages.getMessage(Messages.DATAFEED_CONFIG_INVALID_OPTION_VALUE, + DatafeedConfig.MAX_EMPTY_SEARCHES.getPreferredName(), maxEmptySearches); + throw ExceptionsHelper.badRequestException(msg); + } + this.maxEmptySearches = maxEmptySearches; + return this; + } + public DatafeedUpdate build() { return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, queryProvider, aggProvider, scriptFields, scrollSize, - chunkingConfig, delayedDataCheckConfig); + chunkingConfig, delayedDataCheckConfig, maxEmptySearches); } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index 7afcc9799f7..00d39ec562c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.datafeed; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -68,6 +69,7 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; public class DatafeedConfigTests extends AbstractSerializingTestCase { @@ -149,6 +151,9 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase conf.setIndices(null)); } + public void testCheckValid_GivenInvalidMaxEmptySearches() { + DatafeedConfig.Builder conf = new DatafeedConfig.Builder("datafeed1", "job1"); + ElasticsearchStatusException e = + expectThrows(ElasticsearchStatusException.class, () -> conf.setMaxEmptySearches(randomFrom(-2, 0))); + assertThat(e.getMessage(), containsString("Invalid max_empty_searches value")); + } + + public void testCheckValid_GivenMaxEmptySearchesMinusOne() { + DatafeedConfig.Builder conf = new DatafeedConfig.Builder("datafeed1", "job1"); + conf.setIndices(Collections.singletonList("whatever")); + conf.setMaxEmptySearches(-1); + assertThat(conf.build().getMaxEmptySearches(), is(nullValue())); + } + public void testCheckValid_GivenEmptyIndices() { DatafeedConfig.Builder conf = new DatafeedConfig.Builder("datafeed1", "job1"); conf.setIndices(Collections.emptyList()); @@ -824,7 +843,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase { + GetDatafeedsStatsAction.Request request = new GetDatafeedsStatsAction.Request(datafeedId); + GetDatafeedsStatsAction.Response response = client().execute(GetDatafeedsStatsAction.INSTANCE, request).actionGet(); + assertThat(response.getResponse().results().get(0).getDatafeedState(), equalTo(DatafeedState.STOPPED)); + }); + + // ...and should have auto-closed the job too + assertBusy(() -> { + GetJobsStatsAction.Request request = new GetJobsStatsAction.Request(jobId); + GetJobsStatsAction.Response response = client().execute(GetJobsStatsAction.INSTANCE, request).actionGet(); + assertThat(response.getResponse().results().get(0).getState(), equalTo(JobState.CLOSED)); + }); + } + public void testRealtime_multipleStopCalls() throws Exception { String jobId = "realtime-job-multiple-stop"; final String datafeedId = jobId + "-datafeed"; @@ -402,13 +423,22 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase { } private void startRealtime(String jobId) throws Exception { + startRealtime(jobId, null); + } + + private void startRealtime(String jobId, Integer maxEmptySearches) throws Exception { client().admin().indices().prepareCreate("data") .addMapping("type", "time", "type=date") .get(); - long numDocs1 = randomIntBetween(32, 2048); long now = System.currentTimeMillis(); - long lastWeek = now - 604800000; - indexDocs(logger, "data", numDocs1, lastWeek, now); + long numDocs1; + if (maxEmptySearches == null) { + numDocs1 = randomIntBetween(32, 2048); + long lastWeek = now - 604800000; + indexDocs(logger, "data", numDocs1, lastWeek, now); + } else { + numDocs1 = 0; + } Job.Builder job = createScheduledJob(jobId); registerJob(job); @@ -416,7 +446,12 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase { openJob(job.getId()); assertBusy(() -> assertEquals(getJobStats(job.getId()).get(0).getState(), JobState.OPENED)); - DatafeedConfig datafeedConfig = createDatafeed(job.getId() + "-datafeed", job.getId(), Collections.singletonList("data")); + DatafeedConfig.Builder datafeedConfigBuilder = + createDatafeedBuilder(job.getId() + "-datafeed", job.getId(), Collections.singletonList("data")); + if (maxEmptySearches != null) { + datafeedConfigBuilder.setMaxEmptySearches(maxEmptySearches); + } + DatafeedConfig datafeedConfig = datafeedConfigBuilder.build(); registerDatafeed(datafeedConfig); putDatafeed(datafeedConfig); startDatafeed(datafeedConfig.getId(), 0L, null); @@ -426,9 +461,15 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase { assertThat(dataCounts.getOutOfOrderTimeStampCount(), equalTo(0L)); }); - long numDocs2 = randomIntBetween(2, 64); now = System.currentTimeMillis(); - indexDocs(logger, "data", numDocs2, now + 5000, now + 6000); + long numDocs2; + if (maxEmptySearches == null) { + numDocs2 = randomIntBetween(2, 64); + indexDocs(logger, "data", numDocs2, now + 5000, now + 6000); + } else { + numDocs2 = 0; + } + assertBusy(() -> { DataCounts dataCounts = getDataCounts(job.getId()); assertThat(dataCounts.getProcessedRecordCount(), equalTo(numDocs1 + numDocs2)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java index 257a1947528..22ebff57a4b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java @@ -64,6 +64,7 @@ class DatafeedJob { private final DatafeedTimingStatsReporter timingStatsReporter; private final Supplier currentTimeSupplier; private final DelayedDataDetector delayedDataDetector; + private final Integer maxEmptySearches; private volatile long lookbackStartTimeMs; private volatile long latestFinalBucketEndTimeMs; @@ -73,11 +74,12 @@ class DatafeedJob { private volatile Long lastEndTimeMs; private AtomicBoolean running = new AtomicBoolean(true); private volatile boolean isIsolated; + private volatile boolean haveEverSeenData; DatafeedJob(String jobId, DataDescription dataDescription, long frequencyMs, long queryDelayMs, DataExtractorFactory dataExtractorFactory, DatafeedTimingStatsReporter timingStatsReporter, Client client, AnomalyDetectionAuditor auditor, Supplier currentTimeSupplier, DelayedDataDetector delayedDataDetector, - long latestFinalBucketEndTimeMs, long latestRecordTimeMs) { + Integer maxEmptySearches, long latestFinalBucketEndTimeMs, long latestRecordTimeMs, boolean haveSeenDataPreviously) { this.jobId = jobId; this.dataDescription = Objects.requireNonNull(dataDescription); this.frequencyMs = frequencyMs; @@ -88,11 +90,13 @@ class DatafeedJob { this.auditor = auditor; this.currentTimeSupplier = currentTimeSupplier; this.delayedDataDetector = delayedDataDetector; + this.maxEmptySearches = maxEmptySearches; this.latestFinalBucketEndTimeMs = latestFinalBucketEndTimeMs; long lastEndTime = Math.max(latestFinalBucketEndTimeMs, latestRecordTimeMs); if (lastEndTime > 0) { lastEndTimeMs = lastEndTime; } + this.haveEverSeenData = haveSeenDataPreviously; } void isolate() { @@ -108,6 +112,10 @@ class DatafeedJob { return jobId; } + public Integer getMaxEmptySearches() { + return maxEmptySearches; + } + public void finishReportingTimingStats() { timingStatsReporter.finishReporting(); } @@ -380,6 +388,7 @@ class DatafeedJob { break; } recordCount += counts.getProcessedRecordCount(); + haveEverSeenData |= (recordCount > 0); if (counts.getLatestRecordTimeStamp() != null) { lastEndTimeMs = counts.getLatestRecordTimeStamp().getTime(); } @@ -406,7 +415,7 @@ class DatafeedJob { } if (recordCount == 0) { - throw new EmptyDataCountException(nextRealtimeTimestamp()); + throw new EmptyDataCountException(nextRealtimeTimestamp(), haveEverSeenData); } } @@ -509,10 +518,11 @@ class DatafeedJob { static class EmptyDataCountException extends RuntimeException { final long nextDelayInMsSinceEpoch; + final boolean haveEverSeenData; - EmptyDataCountException(long nextDelayInMsSinceEpoch) { + EmptyDataCountException(long nextDelayInMsSinceEpoch, boolean haveEverSeenData) { this.nextDelayInMsSinceEpoch = nextDelayInMsSinceEpoch; + this.haveEverSeenData = haveEverSeenData; } } - } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java index 1d0bd1ae04a..dc88dac0e7c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobBuilder.java @@ -92,8 +92,10 @@ public class DatafeedJobBuilder { auditor, currentTimeSupplier, delayedDataDetector, + datafeedConfigHolder.get().getMaxEmptySearches(), context.latestFinalBucketEndMs, - context.latestRecordTimeMs); + context.latestRecordTimeMs, + context.haveSeenDataPreviously); listener.onResponse(datafeedJob); }; @@ -128,6 +130,7 @@ public class DatafeedJobBuilder { if (dataCounts.getLatestRecordTimeStamp() != null) { context.latestRecordTimeMs = dataCounts.getLatestRecordTimeStamp().getTime(); } + context.haveSeenDataPreviously = (dataCounts.getInputRecordCount() > 0); jobResultsProvider.datafeedTimingStats(jobHolder.get().getId(), datafeedTimingStatsHandler, listener::onFailure); }; @@ -223,6 +226,7 @@ public class DatafeedJobBuilder { private static class Context { volatile long latestFinalBucketEndMs = -1L; volatile long latestRecordTimeMs = -1L; + volatile boolean haveSeenDataPreviously; volatile DataExtractorFactory dataExtractorFactory; volatile DatafeedTimingStatsReporter timingStatsReporter; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index 30a3948fcc2..2b7a40abc2e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -80,7 +80,6 @@ public class DatafeedManager { clusterService.addListener(taskRunner); } - public void run(TransportStartDatafeedAction.DatafeedTask task, Consumer finishHandler) { String datafeedId = task.getDatafeedId(); @@ -233,7 +232,7 @@ public class DatafeedManager { long nextDelayInMsSinceEpoch; try { nextDelayInMsSinceEpoch = holder.executeRealTime(); - holder.problemTracker.reportNoneEmptyCount(); + holder.problemTracker.reportNonEmptyDataCount(); } catch (DatafeedJob.ExtractionProblemException e) { nextDelayInMsSinceEpoch = e.nextDelayInMsSinceEpoch; holder.problemTracker.reportExtractionProblem(e.getCause().getMessage()); @@ -245,8 +244,15 @@ public class DatafeedManager { return; } } catch (DatafeedJob.EmptyDataCountException e) { + int emptyDataCount = holder.problemTracker.reportEmptyDataCount(); + if (e.haveEverSeenData == false && holder.shouldStopAfterEmptyData(emptyDataCount)) { + logger.warn("Datafeed for [" + jobId + "] has seen no data in [" + emptyDataCount + + "] attempts, and never seen any data previously, so stopping..."); + // In this case we auto-close the job, as though a lookback-only datafeed stopped + holder.stop("no_data", TimeValue.timeValueSeconds(20), e, true); + return; + } nextDelayInMsSinceEpoch = e.nextDelayInMsSinceEpoch; - holder.problemTracker.reportEmptyDataCount(); } catch (Exception e) { logger.error("Unexpected datafeed failure for job [" + jobId + "] stopping...", e); holder.stop("general_realtime_error", TimeValue.timeValueSeconds(20), e); @@ -303,7 +309,7 @@ public class DatafeedManager { // To ensure that we wait until lookback / realtime search has completed before we stop the datafeed private final ReentrantLock datafeedJobLock = new ReentrantLock(true); private final DatafeedJob datafeedJob; - private final boolean autoCloseJob; + private final boolean defaultAutoCloseJob; private final ProblemTracker problemTracker; private final Consumer finishHandler; volatile Scheduler.Cancellable cancellable; @@ -315,11 +321,16 @@ public class DatafeedManager { this.allocationId = task.getAllocationId(); this.datafeedId = datafeedId; this.datafeedJob = datafeedJob; - this.autoCloseJob = task.isLookbackOnly(); + this.defaultAutoCloseJob = task.isLookbackOnly(); this.problemTracker = problemTracker; this.finishHandler = finishHandler; } + boolean shouldStopAfterEmptyData(int emptyDataCount) { + Integer emptyDataCountToStopAt = datafeedJob.getMaxEmptySearches(); + return emptyDataCountToStopAt != null && emptyDataCount >= emptyDataCountToStopAt; + } + String getJobId() { return datafeedJob.getJobId(); } @@ -333,6 +344,10 @@ public class DatafeedManager { } public void stop(String source, TimeValue timeout, Exception e) { + stop(source, timeout, e, defaultAutoCloseJob); + } + + public void stop(String source, TimeValue timeout, Exception e, boolean autoCloseJob) { if (isNodeShuttingDown) { return; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/ProblemTracker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/ProblemTracker.java index 69a821d4246..a8260c2eade 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/ProblemTracker.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/ProblemTracker.java @@ -74,16 +74,14 @@ class ProblemTracker { * Updates the tracking of empty data cycles. If the number of consecutive empty data * cycles reaches {@code EMPTY_DATA_WARN_COUNT}, a warning is reported. */ - public void reportEmptyDataCount() { - if (emptyDataCount < EMPTY_DATA_WARN_COUNT) { - emptyDataCount++; - if (emptyDataCount == EMPTY_DATA_WARN_COUNT) { - auditor.warning(jobId, Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_NO_DATA)); - } + public int reportEmptyDataCount() { + if (++emptyDataCount == EMPTY_DATA_WARN_COUNT) { + auditor.warning(jobId, Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_NO_DATA)); } + return emptyDataCount; } - public void reportNoneEmptyCount() { + public void reportNonEmptyDataCount() { if (emptyDataCount >= EMPTY_DATA_WARN_COUNT) { auditor.info(jobId, Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_DATA_SEEN_AGAIN)); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java index b7f960cc4b8..cf3cb964661 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJobTests.java @@ -133,7 +133,7 @@ public class DatafeedJobTests extends ESTestCase { } public void testLookBackRunWithEndTime() throws Exception { - DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1); + DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1, randomBoolean()); assertNull(datafeedJob.runLookBack(0L, 1000L)); verify(dataExtractorFactory).newExtractor(0L, 1000L); @@ -145,7 +145,7 @@ public class DatafeedJobTests extends ESTestCase { public void testSetIsolated() throws Exception { currentTime = 2000L; - DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1); + DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1, randomBoolean()); datafeedJob.isolate(); assertNull(datafeedJob.runLookBack(0L, null)); @@ -158,7 +158,7 @@ public class DatafeedJobTests extends ESTestCase { currentTime = 2000L; long frequencyMs = 1000; long queryDelayMs = 500; - DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, -1, -1); + DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, -1, -1, randomBoolean()); long next = datafeedJob.runLookBack(0L, null); assertEquals(2000 + frequencyMs + queryDelayMs + 100, next); @@ -181,7 +181,7 @@ public class DatafeedJobTests extends ESTestCase { long frequencyMs = 1000; long queryDelayMs = 500; - DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, latestFinalBucketEndTimeMs, latestRecordTimeMs); + DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, latestFinalBucketEndTimeMs, latestRecordTimeMs, true); long next = datafeedJob.runLookBack(0L, null); assertEquals(10000 + frequencyMs + queryDelayMs + 100, next); @@ -206,7 +206,7 @@ public class DatafeedJobTests extends ESTestCase { long frequencyMs = 1000; long queryDelayMs = 500; - DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, latestFinalBucketEndTimeMs, latestRecordTimeMs); + DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, latestFinalBucketEndTimeMs, latestRecordTimeMs, true); datafeedJob.runLookBack(currentTime, null); // advance time @@ -238,7 +238,7 @@ public class DatafeedJobTests extends ESTestCase { currentTime = 60000L; long frequencyMs = 100; long queryDelayMs = 1000; - DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, 1000, -1); + DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, 1000, -1, false); long next = datafeedJob.runRealtime(); assertEquals(currentTime + frequencyMs + 100, next); @@ -344,7 +344,7 @@ public class DatafeedJobTests extends ESTestCase { public void testEmptyDataCountGivenlookback() throws Exception { when(dataExtractor.hasNext()).thenReturn(false); - DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1); + DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1, false); expectThrows(DatafeedJob.EmptyDataCountException.class, () -> datafeedJob.runLookBack(0L, 1000L)); verify(client, times(1)).execute(same(FlushJobAction.INSTANCE), any()); verify(client, never()).execute(same(PersistJobAction.INSTANCE), any()); @@ -355,7 +355,7 @@ public class DatafeedJobTests extends ESTestCase { when(dataExtractor.hasNext()).thenReturn(true); when(dataExtractor.next()).thenThrow(new IOException()); - DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1); + DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1, randomBoolean()); expectThrows(DatafeedJob.ExtractionProblemException.class, () -> datafeedJob.runLookBack(0L, 1000L)); currentTime = 3001; @@ -382,7 +382,7 @@ public class DatafeedJobTests extends ESTestCase { when(dataExtractor.getEndTime()).thenReturn(1000L); - DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1); + DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1, randomBoolean()); DatafeedJob.AnalysisProblemException analysisProblemException = expectThrows(DatafeedJob.AnalysisProblemException.class, () -> datafeedJob.runLookBack(0L, 1000L)); assertThat(analysisProblemException.shouldStop, is(false)); @@ -411,7 +411,7 @@ public class DatafeedJobTests extends ESTestCase { when(dataExtractor.getEndTime()).thenReturn(1000L); - DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1); + DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1, randomBoolean()); DatafeedJob.AnalysisProblemException analysisProblemException = expectThrows(DatafeedJob.AnalysisProblemException.class, () -> datafeedJob.runLookBack(0L, 1000L)); assertThat(analysisProblemException.shouldStop, is(true)); @@ -436,7 +436,7 @@ public class DatafeedJobTests extends ESTestCase { currentTime = 60000L; long frequencyMs = 100; long queryDelayMs = 1000; - DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, 1000, -1); + DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, 1000, -1, randomBoolean()); DatafeedJob.AnalysisProblemException analysisProblemException = expectThrows(DatafeedJob.AnalysisProblemException.class, () -> datafeedJob.runRealtime()); assertThat(analysisProblemException.shouldStop, is(false)); @@ -448,16 +448,17 @@ public class DatafeedJobTests extends ESTestCase { currentTime = 60000L; long frequencyMs = 100; long queryDelayMs = 1000; - DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, 1000, -1); + DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, 1000, -1, randomBoolean()); DatafeedJob.AnalysisProblemException analysisProblemException = expectThrows(DatafeedJob.AnalysisProblemException.class, () -> datafeedJob.runRealtime()); assertThat(analysisProblemException.shouldStop, is(true)); } private DatafeedJob createDatafeedJob(long frequencyMs, long queryDelayMs, long latestFinalBucketEndTimeMs, - long latestRecordTimeMs) { + long latestRecordTimeMs, boolean haveSeenDataPreviously) { Supplier currentTimeSupplier = () -> currentTime; return new DatafeedJob(jobId, dataDescription.build(), frequencyMs, queryDelayMs, dataExtractorFactory, timingStatsReporter, - client, auditor, currentTimeSupplier, delayedDataDetector, latestFinalBucketEndTimeMs, latestRecordTimeMs); + client, auditor, currentTimeSupplier, delayedDataDetector, null, latestFinalBucketEndTimeMs, latestRecordTimeMs, + haveSeenDataPreviously); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java index 765c70e00ad..c72b11b772f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManagerTests.java @@ -114,6 +114,7 @@ public class DatafeedManagerTests extends ESTestCase { when(datafeedJob.isRunning()).thenReturn(true); when(datafeedJob.stop()).thenReturn(true); when(datafeedJob.getJobId()).thenReturn(job.getId()); + when(datafeedJob.getMaxEmptySearches()).thenReturn(null); DatafeedJobBuilder datafeedJobBuilder = mock(DatafeedJobBuilder.class); doAnswer(invocationOnMock -> { @SuppressWarnings("rawtypes") @@ -133,7 +134,7 @@ public class DatafeedManagerTests extends ESTestCase { } public void testLookbackOnly_WarnsWhenNoDataIsRetrieved() throws Exception { - when(datafeedJob.runLookBack(0L, 60000L)).thenThrow(new DatafeedJob.EmptyDataCountException(0L)); + when(datafeedJob.runLookBack(0L, 60000L)).thenThrow(new DatafeedJob.EmptyDataCountException(0L, false)); Consumer handler = mockConsumer(); DatafeedTask task = createDatafeedTask("datafeed_id", 0L, 60000L); datafeedManager.run(task, handler); @@ -176,8 +177,8 @@ public class DatafeedManagerTests extends ESTestCase { return mock(Scheduler.ScheduledCancellable.class); }).when(threadPool).schedule(any(), any(), any()); - when(datafeedJob.runLookBack(anyLong(), anyLong())).thenThrow(new DatafeedJob.EmptyDataCountException(0L)); - when(datafeedJob.runRealtime()).thenThrow(new DatafeedJob.EmptyDataCountException(0L)); + when(datafeedJob.runLookBack(anyLong(), anyLong())).thenThrow(new DatafeedJob.EmptyDataCountException(0L, false)); + when(datafeedJob.runRealtime()).thenThrow(new DatafeedJob.EmptyDataCountException(0L, false)); Consumer handler = mockConsumer(); DatafeedTask task = createDatafeedTask("datafeed_id", 0L, null); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/ProblemTrackerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/ProblemTrackerTests.java index bfbd85ca944..d4deb6c6650 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/ProblemTrackerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/ProblemTrackerTests.java @@ -85,7 +85,7 @@ public class ProblemTrackerTests extends ESTestCase { for (int i = 0; i < 9; i++) { problemTracker.reportEmptyDataCount(); } - problemTracker.reportNoneEmptyCount(); + problemTracker.reportNonEmptyDataCount(); Mockito.verifyNoMoreInteractions(auditor); } @@ -94,7 +94,7 @@ public class ProblemTrackerTests extends ESTestCase { for (int i = 0; i < 10; i++) { problemTracker.reportEmptyDataCount(); } - problemTracker.reportNoneEmptyCount(); + problemTracker.reportNonEmptyDataCount(); verify(auditor).warning("foo", "Datafeed has been retrieving no data for a while"); verify(auditor).info("foo", "Datafeed has started retrieving data again"); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml index 2ae53b56926..75bcbdedf2e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml @@ -181,8 +181,10 @@ setup: "indexes":["index-foo"], "scroll_size": 2000, "frequency": "1m", - "query_delay": "30s" + "query_delay": "30s", + "max_empty_searches": 42 } + - match: { max_empty_searches: 42 } - do: ml.update_datafeed: @@ -192,7 +194,8 @@ setup: "indexes":["index-*"], "scroll_size": 10000, "frequency": "2m", - "query_delay": "0s" + "query_delay": "0s", + "max_empty_searches": -1 } - match: { datafeed_id: "test-datafeed-1" } - match: { job_id: "datafeeds-crud-1" } @@ -200,6 +203,7 @@ setup: - match: { scroll_size: 10000 } - match: { frequency: "2m" } - match: { query_delay: "0s" } + - is_false: max_empty_searches --- "Test update datafeed to point to different job": @@ -364,7 +368,8 @@ setup: } } } - } + }, + "max_empty_searches": -1 } - do: ml.get_datafeeds: @@ -374,6 +379,7 @@ setup: - match: { datafeeds.0.aggregations.histogram_buckets.aggs.@timestamp.max.field: "@timestamp" } - match: { datafeeds.0.aggregations.histogram_buckets.aggs.bytes_in_avg.avg.field: "system.network.in.bytes" } - match: { datafeeds.0.aggregations.histogram_buckets.aggs.non_negative_bytes.bucket_script.buckets_path.bytes: "bytes_in_derivative" } + - is_false: max_empty_searches --- "Test delete datafeed": From 699d4d4c6f89cc39e4979c1a0c9f7cdbfbb94881 Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Mon, 14 Oct 2019 10:19:57 -0600 Subject: [PATCH 09/16] Manage retention of partial snapshots in SLM (#47833) Currently, partial snapshots will eventually build up unless they are manually deleted. Partial snapshots may be useful if there is not a more recent successful snapshot, but should eventually be deleted if they are no longer useful. With this change, partial snapshots are deleted using the following strategy: PARTIAL snapshots will be kept until the configured expire_after period has passed, if present, and then be deleted. If there is no configured expire_after in the retention policy, then they will be deleted if there is at least one more recent successful snapshot from this policy (as they may otherwise be useful for troubleshooting purposes). Partial snapshots are not counted towards either min_count or max_count. --- .../slm/SnapshotRetentionConfiguration.java | 18 +-- .../SnapshotRetentionConfigurationTests.java | 106 +++++++++++++++--- .../xpack/slm/SnapshotRetentionTask.java | 2 +- .../slm/SLMSnapshotBlockingIntegTests.java | 57 +++++++--- 4 files changed, 143 insertions(+), 40 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotRetentionConfiguration.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotRetentionConfiguration.java index 970b54a7b58..f8d78ed7427 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotRetentionConfiguration.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotRetentionConfiguration.java @@ -23,7 +23,9 @@ import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotState; import java.io.IOException; +import java.util.Arrays; import java.util.Comparator; +import java.util.HashSet; import java.util.List; import java.util.Objects; import java.util.Set; @@ -127,14 +129,15 @@ public class SnapshotRetentionConfiguration implements ToXContentObject, Writeab .mapToLong(SnapshotInfo::startTime) .max() .orElse(Long.MIN_VALUE); + final Set unsuccessfulStates = new HashSet<>(Arrays.asList(SnapshotState.FAILED, SnapshotState.PARTIAL)); return si -> { final String snapName = si.snapshotId().getName(); // First, if there's no expire_after and a more recent successful snapshot, we can delete all the failed ones - if (this.expireAfter == null && SnapshotState.FAILED.equals(si.state()) && newestSuccessfulTimestamp > si.startTime()) { + if (this.expireAfter == null && unsuccessfulStates.contains(si.state()) && newestSuccessfulTimestamp > si.startTime()) { // There's no expire_after and there's a more recent successful snapshot, delete this failed one - logger.trace("[{}]: ELIGIBLE as it is FAILED and there is a more recent successful snapshot", snapName); + logger.trace("[{}]: ELIGIBLE as it is {} and there is a more recent successful snapshot", snapName, si.state()); return true; } @@ -167,13 +170,13 @@ public class SnapshotRetentionConfiguration implements ToXContentObject, Writeab // expiration time if (this.minimumSnapshotCount != null) { if (successfulSnapshotCount <= this.minimumSnapshotCount) - if (SnapshotState.FAILED.equals(si.state()) == false) { + if (unsuccessfulStates.contains(si.state()) == false) { logger.trace("[{}]: INELIGIBLE as there are {} non-failed snapshots ({} total) and {} minimum snapshots needed", snapName, successfulSnapshotCount, totalSnapshotCount, this.minimumSnapshotCount); return false; } else { logger.trace("[{}]: SKIPPING minimum snapshot count check as this snapshot is {} and not counted " + - "towards the minimum snapshot count.", snapName, SnapshotState.FAILED); + "towards the minimum snapshot count.", snapName, si.state()); } } @@ -190,10 +193,11 @@ public class SnapshotRetentionConfiguration implements ToXContentObject, Writeab final Stream successfulSnapsEligibleForExpiration = sortedSnapshots.stream() .filter(snap -> SnapshotState.SUCCESS.equals(snap.state())) .limit(eligibleForExpiration); - final Stream failedSnaps = sortedSnapshots.stream() - .filter(snap -> SnapshotState.FAILED.equals(snap.state())); + final Stream unsucessfulSnaps = sortedSnapshots.stream() + .filter(snap -> unsuccessfulStates.contains(snap.state())); - final Set snapsEligibleForExpiration = Stream.concat(successfulSnapsEligibleForExpiration, failedSnaps) + final Set snapsEligibleForExpiration = Stream + .concat(successfulSnapsEligibleForExpiration, unsucessfulSnaps) .collect(Collectors.toSet()); if (snapsEligibleForExpiration.contains(si) == false) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionConfigurationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionConfigurationTests.java index 50832553edf..430b076bab2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionConfigurationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionConfigurationTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotShardFailure; +import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy; import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration; @@ -103,13 +104,21 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase { } public void testFailuresDeletedIfExpired() { + assertUnsuccessfulDeletedIfExpired(true); + } + + public void testPartialsDeletedIfExpired() { + assertUnsuccessfulDeletedIfExpired(false); + } + + private void assertUnsuccessfulDeletedIfExpired(boolean failure) { SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration( () -> TimeValue.timeValueDays(1).millis() + 1, TimeValue.timeValueDays(1), null, null); - SnapshotInfo oldInfo = makeFailureInfo(0); + SnapshotInfo oldInfo = makeFailureOrPartial(0, failure); assertThat(conf.getSnapshotDeletionPredicate(Collections.singletonList(oldInfo)).test(oldInfo), equalTo(true)); - SnapshotInfo newInfo = makeFailureInfo(1); + SnapshotInfo newInfo = makeFailureOrPartial(1, failure); assertThat(conf.getSnapshotDeletionPredicate(Collections.singletonList(newInfo)).test(newInfo), equalTo(false)); List infos = new ArrayList<>(); @@ -120,10 +129,18 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase { } public void testFailuresDeletedIfNoExpiryAndMoreRecentSuccessExists() { + assertUnsuccessfulDeletedIfNoExpiryAndMoreRecentSuccessExists(true); + } + + public void testPartialsDeletedIfNoExpiryAndMoreRecentSuccessExists() { + assertUnsuccessfulDeletedIfNoExpiryAndMoreRecentSuccessExists(false); + } + + private void assertUnsuccessfulDeletedIfNoExpiryAndMoreRecentSuccessExists(boolean failure) { SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> 1, null, 2, 5); SnapshotInfo s1 = makeInfo(1); SnapshotInfo s2 = makeInfo(2); - SnapshotInfo s3 = makeFailureInfo(3); + SnapshotInfo s3 = makeFailureOrPartial(3, failure); SnapshotInfo s4 = makeInfo(4); List infos = Arrays.asList(s1 , s2, s3, s4); @@ -134,12 +151,20 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase { } public void testFailuresKeptIfNoExpiryAndNoMoreRecentSuccess() { + assertUnsuccessfulKeptIfNoExpiryAndNoMoreRecentSuccess(true); + } + + public void testPartialsKeptIfNoExpiryAndNoMoreRecentSuccess() { + assertUnsuccessfulKeptIfNoExpiryAndNoMoreRecentSuccess(false); + } + + private void assertUnsuccessfulKeptIfNoExpiryAndNoMoreRecentSuccess(boolean failure) { // Also tests that failures are not counted towards the maximum SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> 1, null, 2, 3); SnapshotInfo s1 = makeInfo(1); SnapshotInfo s2 = makeInfo(2); SnapshotInfo s3 = makeInfo(3); - SnapshotInfo s4 = makeFailureInfo(4); + SnapshotInfo s4 = makeFailureOrPartial(4, failure); List infos = Arrays.asList(s1 , s2, s3, s4); assertThat(conf.getSnapshotDeletionPredicate(infos).test(s1), equalTo(false)); @@ -149,11 +174,19 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase { } public void testFailuresNotCountedTowardsMaximum() { + assertUnsuccessfulNotCountedTowardsMaximum(true); + } + + public void testPartialsNotCountedTowardsMaximum() { + assertUnsuccessfulNotCountedTowardsMaximum(false); + } + + private void assertUnsuccessfulNotCountedTowardsMaximum(boolean failure) { SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> 1, TimeValue.timeValueDays(1), 2, 2); SnapshotInfo s1 = makeInfo(1); - SnapshotInfo s2 = makeFailureInfo(2); - SnapshotInfo s3 = makeFailureInfo(3); - SnapshotInfo s4 = makeFailureInfo(4); + SnapshotInfo s2 = makeFailureOrPartial(2, failure); + SnapshotInfo s3 = makeFailureOrPartial(3, failure); + SnapshotInfo s4 = makeFailureOrPartial(4, failure); SnapshotInfo s5 = makeInfo(5); List infos = Arrays.asList(s1 , s2, s3, s4, s5); @@ -165,10 +198,18 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase { } public void testFailuresNotCountedTowardsMinimum() { + assertUnsuccessfulNotCountedTowardsMinimum(true); + } + + public void testPartialsNotCountedTowardsMinimum() { + assertUnsuccessfulNotCountedTowardsMinimum(false); + } + + private void assertUnsuccessfulNotCountedTowardsMinimum(boolean failure) { SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> TimeValue.timeValueDays(1).millis() + 1, TimeValue.timeValueDays(1), 2, null); SnapshotInfo oldInfo = makeInfo(0); - SnapshotInfo failureInfo = makeFailureInfo( 1); + SnapshotInfo failureInfo = makeFailureOrPartial(1, failure); SnapshotInfo newInfo = makeInfo(2); List infos = new ArrayList<>(); @@ -186,12 +227,14 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase { assertThat(conf.getSnapshotDeletionPredicate(infos).test(oldInfo), equalTo(true)); } + public void testMostRecentSuccessfulTimestampIsUsed() { + boolean failureBeforePartial = randomBoolean(); SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> 1, null, 2, 2); SnapshotInfo s1 = makeInfo(1); SnapshotInfo s2 = makeInfo(2); - SnapshotInfo s3 = makeFailureInfo(3); - SnapshotInfo s4 = makeFailureInfo(4); + SnapshotInfo s3 = makeFailureOrPartial(3, failureBeforePartial); + SnapshotInfo s4 = makeFailureOrPartial(4, failureBeforePartial == false); List infos = Arrays.asList(s1 , s2, s3, s4); assertThat(conf.getSnapshotDeletionPredicate(infos).test(s1), equalTo(false)); @@ -204,15 +247,25 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase { final Map meta = new HashMap<>(); meta.put(SnapshotLifecyclePolicy.POLICY_ID_METADATA_FIELD, REPO); final int totalShards = between(1,20); - return new SnapshotInfo(new SnapshotId("snap-" + randomAlphaOfLength(3), "uuid"), + SnapshotInfo snapInfo = new SnapshotInfo(new SnapshotId("snap-" + randomAlphaOfLength(3), "uuid"), Collections.singletonList("foo"), startTime, null, - startTime + between(1,10000), + startTime + between(1, 10000), totalShards, new ArrayList<>(), false, meta); + assertThat(snapInfo.state(), equalTo(SnapshotState.SUCCESS)); + return snapInfo; + } + + private SnapshotInfo makeFailureOrPartial(long startTime, boolean failure) { + if (failure) { + return makeFailureInfo(startTime); + } else { + return makePartialInfo(startTime); + } } private SnapshotInfo makeFailureInfo(long startTime) { @@ -225,14 +278,39 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase { failures.add(new SnapshotShardFailure("nodeId", new ShardId("index-name", "index-uuid", i), "failed")); } assert failureCount == failures.size(); - return new SnapshotInfo(new SnapshotId("snap-fail-" + randomAlphaOfLength(3), "uuid-fail"), + SnapshotInfo snapInfo = new SnapshotInfo(new SnapshotId("snap-fail-" + randomAlphaOfLength(3), "uuid-fail"), Collections.singletonList("foo-fail"), startTime, "forced-failure", - startTime + between(1,10000), + startTime + between(1, 10000), totalShards, failures, randomBoolean(), meta); + assertThat(snapInfo.state(), equalTo(SnapshotState.FAILED)); + return snapInfo; + } + + private SnapshotInfo makePartialInfo(long startTime) { + final Map meta = new HashMap<>(); + meta.put(SnapshotLifecyclePolicy.POLICY_ID_METADATA_FIELD, REPO); + final int totalShards = between(2,20); + final List failures = new ArrayList<>(); + final int failureCount = between(1,totalShards - 1); + for (int i = 0; i < failureCount; i++) { + failures.add(new SnapshotShardFailure("nodeId", new ShardId("index-name", "index-uuid", i), "failed")); + } + assert failureCount == failures.size(); + SnapshotInfo snapInfo = new SnapshotInfo(new SnapshotId("snap-fail-" + randomAlphaOfLength(3), "uuid-fail"), + Collections.singletonList("foo-fail"), + startTime, + null, + startTime + between(1, 10000), + totalShards, + failures, + randomBoolean(), + meta); + assertThat(snapInfo.state(), equalTo(SnapshotState.PARTIAL)); + return snapInfo; } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java index f278f118125..f600f13dc16 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java @@ -250,7 +250,7 @@ public class SnapshotRetentionTask implements SchedulerEngine.Listener { @Override public void onResponse(GetSnapshotsResponse resp) { final Set retainableStates = - new HashSet<>(Arrays.asList(SnapshotState.SUCCESS, SnapshotState.FAILED)); + new HashSet<>(Arrays.asList(SnapshotState.SUCCESS, SnapshotState.FAILED, SnapshotState.PARTIAL)); try { snapshots.compute(repository, (k, previousSnaps) -> { if (previousSnaps != null) { diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java index b255019f504..5029971e0dc 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SLMSnapshotBlockingIntegTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.xpack.core.slm.action.GetSnapshotLifecycleAction; import org.elasticsearch.xpack.core.slm.action.PutSnapshotLifecycleAction; import org.elasticsearch.xpack.ilm.IndexLifecycle; import org.junit.After; +import org.junit.Before; import java.util.Arrays; import java.util.Collection; @@ -60,11 +61,19 @@ import static org.hamcrest.Matchers.greaterThan; */ @TestLogging(value = "org.elasticsearch.snapshots.mockstore:DEBUG", reason = "https://github.com/elastic/elasticsearch/issues/46508") -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase { private static final String REPO = "repo-id"; + @Before + public void ensureClusterNodes() { + logger.info("--> starting enough nodes to ensure we have enough to safely stop for tests"); + internalCluster().startMasterOnlyNodes(2); + internalCluster().startDataOnlyNodes(2); + ensureGreen(); + } + @After public void resetSLMSettings() throws Exception { // Cancel/delete all snapshots @@ -180,7 +189,7 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase { logger.info("--> creating policy {}", policyId); createSnapshotPolicy(policyId, "snap", "1 2 3 4 5 ?", REPO, indexName, true, - new SnapshotRetentionConfiguration(TimeValue.timeValueSeconds(0), null, null)); + false, new SnapshotRetentionConfiguration(TimeValue.timeValueSeconds(0), null, null)); // Create a snapshot and wait for it to be complete (need something that can be deleted) final String completedSnapshotName = executePolicy(policyId); @@ -281,20 +290,26 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/47937") public void testBasicFailureRetention() throws Exception { + testUnsuccessfulSnapshotRetention(false); + + } + + public void testBasicPartialRetention() throws Exception { + testUnsuccessfulSnapshotRetention(true); + } + + private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Exception { final String indexName = "test-idx"; final String policyId = "test-policy"; + final SnapshotState expectedUnsuccessfulState = partialSuccess ? SnapshotState.PARTIAL : SnapshotState.FAILED; // Setup - logger.info("--> starting two master nodes and two data nodes"); - internalCluster().startMasterOnlyNodes(2); - internalCluster().startDataOnlyNodes(2); - createAndPopulateIndex(indexName); // Create a snapshot repo initializeRepo(REPO); createSnapshotPolicy(policyId, "snap", "1 2 3 4 5 ?", REPO, indexName, true, - new SnapshotRetentionConfiguration(null, 1, 2)); + partialSuccess, new SnapshotRetentionConfiguration(null, 1, 2)); // Create a failed snapshot AtomicReference failedSnapshotName = new AtomicReference<>(); @@ -321,12 +336,17 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase { failedSnapshotName.set(snapshotFuture.get().getSnapshotName()); assertNotNull(failedSnapshotName.get()); - logger.info("--> verify that snapshot [{}] failed", failedSnapshotName.get()); + logger.info("--> verify that snapshot [{}] is {}", failedSnapshotName.get(), expectedUnsuccessfulState); assertBusy(() -> { - GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() - .prepareGetSnapshots(REPO).setSnapshots(failedSnapshotName.get()).get(); - SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); - assertEquals(SnapshotState.FAILED, snapshotInfo.state()); + try { + GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() + .prepareGetSnapshots(REPO).setSnapshots(failedSnapshotName.get()).get(); + SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); + assertEquals(expectedUnsuccessfulState, snapshotInfo.state()); + } catch (SnapshotMissingException ex) { + logger.info("failed to find snapshot {}, retrying", failedSnapshotName); + throw new AssertionError(ex); + } }); } @@ -370,14 +390,14 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase { GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() .prepareGetSnapshots(REPO).setSnapshots(failedSnapshotName.get()).get(); SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); - assertEquals(SnapshotState.FAILED, snapshotInfo.state()); + assertEquals(expectedUnsuccessfulState, snapshotInfo.state()); } // Run retention again and make sure the failure was deleted { logger.info("--> executing SLM retention"); assertAcked(client().execute(ExecuteSnapshotRetentionAction.INSTANCE, new ExecuteSnapshotRetentionAction.Request()).get()); - logger.info("--> waiting for failed snapshot [{}] to be deleted", failedSnapshotName.get()); + logger.info("--> waiting for {} snapshot [{}] to be deleted", expectedUnsuccessfulState, failedSnapshotName.get()); assertBusy(() -> { try { GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() @@ -386,8 +406,8 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase { } catch (SnapshotMissingException e) { // This is what we want to happen } - logger.info("--> failed snapshot [{}] has been deleted, checking successful snapshot [{}] still exists", - failedSnapshotName.get(), successfulSnapshotName.get()); + logger.info("--> {} snapshot [{}] has been deleted, checking successful snapshot [{}] still exists", + expectedUnsuccessfulState, failedSnapshotName.get(), successfulSnapshotName.get()); GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster() .prepareGetSnapshots(REPO).setSnapshots(successfulSnapshotName.get()).get(); SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0); @@ -424,15 +444,16 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase { private void createSnapshotPolicy(String policyName, String snapshotNamePattern, String schedule, String REPO, String indexPattern, boolean ignoreUnavailable) { createSnapshotPolicy(policyName, snapshotNamePattern, schedule, REPO, indexPattern, - ignoreUnavailable, SnapshotRetentionConfiguration.EMPTY); + ignoreUnavailable, false, SnapshotRetentionConfiguration.EMPTY); } private void createSnapshotPolicy(String policyName, String snapshotNamePattern, String schedule, String REPO, String indexPattern, boolean ignoreUnavailable, - SnapshotRetentionConfiguration retention) { + boolean partialSnapsAllowed, SnapshotRetentionConfiguration retention) { Map snapConfig = new HashMap<>(); snapConfig.put("indices", Collections.singletonList(indexPattern)); snapConfig.put("ignore_unavailable", ignoreUnavailable); + snapConfig.put("partial", partialSnapsAllowed); if (randomBoolean()) { Map metadata = new HashMap<>(); int fieldCount = randomIntBetween(2,5); From e4ea8b46b60e5e4dc7715d781ee0b35a5cf436b7 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 14 Oct 2019 18:25:53 +0200 Subject: [PATCH 10/16] Add Pause/Resume Auto-Follower APIs to High Level REST Client (#48004) This commit adds support for Pause/Resume Auto-Follower APIs to the HLRC, with the documentation. Relates #47510 --- .../org/elasticsearch/client/CcrClient.java | 88 +++++++++++++ .../client/CcrRequestConverters.java | 20 +++ .../ccr/PauseAutoFollowPatternRequest.java | 45 +++++++ .../ccr/ResumeAutoFollowPatternRequest.java | 45 +++++++ .../client/CcrRequestConvertersTests.java | 22 ++++ .../documentation/CCRDocumentationIT.java | 120 ++++++++++++++++++ .../ccr/pause_auto_follow_pattern.asciidoc | 32 +++++ .../ccr/resume_auto_follow_pattern.asciidoc | 33 +++++ .../high-level/supported-apis.asciidoc | 4 + 9 files changed, 409 insertions(+) create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PauseAutoFollowPatternRequest.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/ResumeAutoFollowPatternRequest.java create mode 100644 docs/java-rest/high-level/ccr/pause_auto_follow_pattern.asciidoc create mode 100644 docs/java-rest/high-level/ccr/resume_auto_follow_pattern.asciidoc diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java index daa7c54b7fe..6064da69265 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java @@ -30,10 +30,12 @@ import org.elasticsearch.client.ccr.FollowStatsResponse; import org.elasticsearch.client.ccr.ForgetFollowerRequest; import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest; import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse; +import org.elasticsearch.client.ccr.PauseAutoFollowPatternRequest; import org.elasticsearch.client.ccr.PauseFollowRequest; import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest; import org.elasticsearch.client.ccr.PutFollowRequest; import org.elasticsearch.client.ccr.PutFollowResponse; +import org.elasticsearch.client.ccr.ResumeAutoFollowPatternRequest; import org.elasticsearch.client.ccr.ResumeFollowRequest; import org.elasticsearch.client.ccr.UnfollowRequest; import org.elasticsearch.client.core.AcknowledgedResponse; @@ -410,6 +412,92 @@ public final class CcrClient { ); } + /** + * Pauses an auto follow pattern. + * + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse pauseAutoFollowPattern(PauseAutoFollowPatternRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( + request, + CcrRequestConverters::pauseAutoFollowPattern, + options, + AcknowledgedResponse::fromXContent, + Collections.emptySet() + ); + } + + /** + * Asynchronously pauses an auto follow pattern. + * + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request + */ + public Cancellable pauseAutoFollowPatternAsync(PauseAutoFollowPatternRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( + request, + CcrRequestConverters::pauseAutoFollowPattern, + options, + AcknowledgedResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Resumes an auto follow pattern. + * + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse resumeAutoFollowPattern(ResumeAutoFollowPatternRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( + request, + CcrRequestConverters::resumeAutoFollowPattern, + options, + AcknowledgedResponse::fromXContent, + Collections.emptySet() + ); + } + + /** + * Asynchronously resumes an auto follow pattern. + * + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request + */ + public Cancellable resumeAutoFollowPatternAsync(ResumeAutoFollowPatternRequest request, + RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity( + request, + CcrRequestConverters::resumeAutoFollowPattern, + options, + AcknowledgedResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Gets all CCR stats. * diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java index 8272e5d73bb..efcb5b8073b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java @@ -29,9 +29,11 @@ import org.elasticsearch.client.ccr.FollowInfoRequest; import org.elasticsearch.client.ccr.FollowStatsRequest; import org.elasticsearch.client.ccr.ForgetFollowerRequest; import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.PauseAutoFollowPatternRequest; import org.elasticsearch.client.ccr.PauseFollowRequest; import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest; import org.elasticsearch.client.ccr.PutFollowRequest; +import org.elasticsearch.client.ccr.ResumeAutoFollowPatternRequest; import org.elasticsearch.client.ccr.ResumeFollowRequest; import org.elasticsearch.client.ccr.UnfollowRequest; @@ -118,6 +120,24 @@ final class CcrRequestConverters { return new Request(HttpGet.METHOD_NAME, endpoint); } + static Request pauseAutoFollowPattern(PauseAutoFollowPatternRequest pauseAutoFollowPatternRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ccr", "auto_follow") + .addPathPart(pauseAutoFollowPatternRequest.getName()) + .addPathPartAsIs("pause") + .build(); + return new Request(HttpPost.METHOD_NAME, endpoint); + } + + static Request resumeAutoFollowPattern(ResumeAutoFollowPatternRequest resumeAutoFollowPatternRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ccr", "auto_follow") + .addPathPart(resumeAutoFollowPatternRequest.getName()) + .addPathPartAsIs("resume") + .build(); + return new Request(HttpPost.METHOD_NAME, endpoint); + } + static Request getCcrStats(CcrStatsRequest ccrStatsRequest) { String endpoint = new RequestConverters.EndpointBuilder() .addPathPartAsIs("_ccr", "stats") diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PauseAutoFollowPatternRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PauseAutoFollowPatternRequest.java new file mode 100644 index 00000000000..e713e16566d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PauseAutoFollowPatternRequest.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ccr; + +import org.elasticsearch.client.Validatable; + +import java.util.Objects; + +/** + * Request class for pause auto follow pattern api. + */ +public final class PauseAutoFollowPatternRequest implements Validatable { + + private final String name; + + /** + * Pause auto follow pattern with the specified name + * + * @param name The name of the auto follow pattern to pause + */ + public PauseAutoFollowPatternRequest(String name) { + this.name = Objects.requireNonNull(name); + } + + public String getName() { + return name; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/ResumeAutoFollowPatternRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/ResumeAutoFollowPatternRequest.java new file mode 100644 index 00000000000..642763a1c3f --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/ResumeAutoFollowPatternRequest.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ccr; + +import org.elasticsearch.client.Validatable; + +import java.util.Objects; + +/** + * Request class for resume auto follow pattern api. + */ +public final class ResumeAutoFollowPatternRequest implements Validatable { + + private final String name; + + /** + * Resume auto follow pattern with the specified name + * + * @param name The name of the auto follow pattern to resume + */ + public ResumeAutoFollowPatternRequest(String name) { + this.name = Objects.requireNonNull(name); + } + + public String getName() { + return name; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CcrRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CcrRequestConvertersTests.java index 393b7b9ba6f..ef506466b25 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CcrRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CcrRequestConvertersTests.java @@ -31,9 +31,11 @@ import org.elasticsearch.client.ccr.FollowInfoRequest; import org.elasticsearch.client.ccr.FollowStatsRequest; import org.elasticsearch.client.ccr.ForgetFollowerRequest; import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest; +import org.elasticsearch.client.ccr.PauseAutoFollowPatternRequest; import org.elasticsearch.client.ccr.PauseFollowRequest; import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest; import org.elasticsearch.client.ccr.PutFollowRequest; +import org.elasticsearch.client.ccr.ResumeAutoFollowPatternRequest; import org.elasticsearch.client.ccr.ResumeFollowRequest; import org.elasticsearch.client.ccr.UnfollowRequest; import org.elasticsearch.common.unit.ByteSizeValue; @@ -143,6 +145,26 @@ public class CcrRequestConvertersTests extends ESTestCase { assertThat(result.getEntity(), nullValue()); } + public void testPauseAutofollowPattern() throws Exception { + PauseAutoFollowPatternRequest pauseAutoFollowPatternRequest = new PauseAutoFollowPatternRequest(randomAlphaOfLength(4)); + + Request result = CcrRequestConverters.pauseAutoFollowPattern(pauseAutoFollowPatternRequest); + assertThat(result.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(result.getEndpoint(), equalTo("/_ccr/auto_follow/" + pauseAutoFollowPatternRequest.getName() + "/pause")); + assertThat(result.getParameters().size(), equalTo(0)); + assertThat(result.getEntity(), nullValue()); + } + + public void testResumeAutofollowPattern() throws Exception { + ResumeAutoFollowPatternRequest resumeAutoFollowPatternRequest = new ResumeAutoFollowPatternRequest(randomAlphaOfLength(4)); + + Request result = CcrRequestConverters.resumeAutoFollowPattern(resumeAutoFollowPatternRequest); + assertThat(result.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(result.getEndpoint(), equalTo("/_ccr/auto_follow/" + resumeAutoFollowPatternRequest.getName() + "/resume")); + assertThat(result.getParameters().size(), equalTo(0)); + assertThat(result.getEntity(), nullValue()); + } + public void testGetCcrStats() throws Exception { CcrStatsRequest ccrStatsRequest = new CcrStatsRequest(); Request result = CcrRequestConverters.getCcrStats(ccrStatsRequest); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java index b78507112b7..8595675792b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java @@ -44,10 +44,12 @@ import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest; import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse; import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse.Pattern; import org.elasticsearch.client.ccr.IndicesFollowStats; +import org.elasticsearch.client.ccr.PauseAutoFollowPatternRequest; import org.elasticsearch.client.ccr.PauseFollowRequest; import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest; import org.elasticsearch.client.ccr.PutFollowRequest; import org.elasticsearch.client.ccr.PutFollowResponse; +import org.elasticsearch.client.ccr.ResumeAutoFollowPatternRequest; import org.elasticsearch.client.ccr.ResumeFollowRequest; import org.elasticsearch.client.ccr.UnfollowRequest; import org.elasticsearch.client.core.AcknowledgedResponse; @@ -681,6 +683,124 @@ public class CCRDocumentationIT extends ESRestHighLevelClientTestCase { } } + public void testPauseAutoFollowPattern() throws Exception { + final RestHighLevelClient client = highLevelClient(); + { + final PutAutoFollowPatternRequest putRequest = + new PutAutoFollowPatternRequest("my_pattern", "local", Collections.singletonList("logs-*")); + AcknowledgedResponse putResponse = client.ccr().putAutoFollowPattern(putRequest, RequestOptions.DEFAULT); + assertThat(putResponse.isAcknowledged(), is(true)); + } + + // tag::ccr-pause-auto-follow-pattern-request + PauseAutoFollowPatternRequest request = + new PauseAutoFollowPatternRequest("my_pattern"); // <1> + // end::ccr-pause-auto-follow-pattern-request + + // tag::ccr-pause-auto-follow-pattern-execute + AcknowledgedResponse response = client.ccr() + .pauseAutoFollowPattern(request, RequestOptions.DEFAULT); + // end::ccr-pause-auto-follow-pattern-execute + + // tag::ccr-pause-auto-follow-pattern-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::ccr-pause-auto-follow-pattern-response + + // tag::ccr-pause-auto-follow-pattern-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { // <1> + boolean paused = response.isAcknowledged(); + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::ccr-pause-auto-follow-pattern-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::ccr-pause-auto-follow-pattern-execute-async + client.ccr().pauseAutoFollowPatternAsync(request, + RequestOptions.DEFAULT, listener); // <1> + // end::ccr-pause-auto-follow-pattern-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + // Cleanup: + { + DeleteAutoFollowPatternRequest deleteRequest = new DeleteAutoFollowPatternRequest("my_pattern"); + AcknowledgedResponse deleteResponse = client.ccr().deleteAutoFollowPattern(deleteRequest, RequestOptions.DEFAULT); + assertThat(deleteResponse.isAcknowledged(), is(true)); + } + } + + public void testResumeAutoFollowPattern() throws Exception { + final RestHighLevelClient client = highLevelClient(); + { + final PutAutoFollowPatternRequest putRequest = + new PutAutoFollowPatternRequest("my_pattern", "local", Collections.singletonList("logs-*")); + AcknowledgedResponse putResponse = client.ccr().putAutoFollowPattern(putRequest, RequestOptions.DEFAULT); + assertThat(putResponse.isAcknowledged(), is(true)); + + final PauseAutoFollowPatternRequest pauseRequest = new PauseAutoFollowPatternRequest("my_pattern"); + AcknowledgedResponse pauseResponse = client.ccr().pauseAutoFollowPattern(pauseRequest, RequestOptions.DEFAULT); + assertThat(pauseResponse.isAcknowledged(), is(true)); + } + + // tag::ccr-resume-auto-follow-pattern-request + ResumeAutoFollowPatternRequest request = + new ResumeAutoFollowPatternRequest("my_pattern"); // <1> + // end::ccr-resume-auto-follow-pattern-request + + // tag::ccr-resume-auto-follow-pattern-execute + AcknowledgedResponse response = client.ccr() + .resumeAutoFollowPattern(request, RequestOptions.DEFAULT); + // end::ccr-resume-auto-follow-pattern-execute + + // tag::ccr-resume-auto-follow-pattern-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::ccr-resume-auto-follow-pattern-response + + // tag::ccr-resume-auto-follow-pattern-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { // <1> + boolean resumed = response.isAcknowledged(); + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::ccr-resume-auto-follow-pattern-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::ccr-resume-auto-follow-pattern-execute-async + client.ccr().resumeAutoFollowPatternAsync(request, + RequestOptions.DEFAULT, listener); // <1> + // end::ccr-resume-auto-follow-pattern-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + // Cleanup: + { + DeleteAutoFollowPatternRequest deleteRequest = new DeleteAutoFollowPatternRequest("my_pattern"); + AcknowledgedResponse deleteResponse = client.ccr().deleteAutoFollowPattern(deleteRequest, RequestOptions.DEFAULT); + assertThat(deleteResponse.isAcknowledged(), is(true)); + } + } + public void testGetCCRStats() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/docs/java-rest/high-level/ccr/pause_auto_follow_pattern.asciidoc b/docs/java-rest/high-level/ccr/pause_auto_follow_pattern.asciidoc new file mode 100644 index 00000000000..2d40e4e9c4a --- /dev/null +++ b/docs/java-rest/high-level/ccr/pause_auto_follow_pattern.asciidoc @@ -0,0 +1,32 @@ +-- +:api: ccr-pause-auto-follow-pattern +:request: PauseAutoFollowPatternRequest +:response: AcknowledgedResponse +-- +[role="xpack"] +[id="{upid}-{api}"] +=== Pause Auto Follow Pattern API + +[id="{upid}-{api}-request"] +==== Request + +The Pause Auto Follow Pattern API allows you to pause an existing auto follow pattern. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The name of the auto follow pattern. + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the pause auto follow pattern request was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the pause auto follow pattern request was acknowledged. + +include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/ccr/resume_auto_follow_pattern.asciidoc b/docs/java-rest/high-level/ccr/resume_auto_follow_pattern.asciidoc new file mode 100644 index 00000000000..8bc24ead277 --- /dev/null +++ b/docs/java-rest/high-level/ccr/resume_auto_follow_pattern.asciidoc @@ -0,0 +1,33 @@ +-- +:api: ccr-resume-auto-follow-pattern +:request: ResumeAutoFollowPatternRequest +:response: AcknowledgedResponse +-- +[role="xpack"] +[id="{upid}-{api}"] +=== Resume Auto Follow Pattern API + +[id="{upid}-{api}-request"] +==== Request + +The Resume Auto Follow Pattern API allows you to resume the activity + for a pause auto follow pattern. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The name of the auto follow pattern. + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the resume auto follow pattern request was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the resume auto follow pattern request was acknowledged. + +include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 2bb1dc9da88..de5570e22d5 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -529,6 +529,8 @@ The Java High Level REST Client supports the following CCR APIs: * <<{upid}-ccr-put-auto-follow-pattern>> * <<{upid}-ccr-delete-auto-follow-pattern>> * <<{upid}-ccr-get-auto-follow-pattern>> +* <<{upid}-ccr-pause-auto-follow-pattern>> +* <<{upid}-ccr-resume-auto-follow-pattern>> * <<{upid}-ccr-get-stats>> * <<{upid}-ccr-get-follow-stats>> * <<{upid}-ccr-get-follow-info>> @@ -541,6 +543,8 @@ include::ccr/forget_follower.asciidoc[] include::ccr/put_auto_follow_pattern.asciidoc[] include::ccr/delete_auto_follow_pattern.asciidoc[] include::ccr/get_auto_follow_pattern.asciidoc[] +include::ccr/pause_auto_follow_pattern.asciidoc[] +include::ccr/resume_auto_follow_pattern.asciidoc[] include::ccr/get_stats.asciidoc[] include::ccr/get_follow_stats.asciidoc[] include::ccr/get_follow_info.asciidoc[] From 170266765b9a5c2fdf30188fa3998904e5711477 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Mon, 14 Oct 2019 12:29:11 -0400 Subject: [PATCH 11/16] [DOCS] Reformat docs for several snapshot lifecycle policy APIs (#47998) --- docs/reference/ilm/apis/slm-api.asciidoc | 116 ++++++++++++++++++----- 1 file changed, 93 insertions(+), 23 deletions(-) diff --git a/docs/reference/ilm/apis/slm-api.asciidoc b/docs/reference/ilm/apis/slm-api.asciidoc index 731f24e3dfc..c686d21c91d 100644 --- a/docs/reference/ilm/apis/slm-api.asciidoc +++ b/docs/reference/ilm/apis/slm-api.asciidoc @@ -317,21 +317,42 @@ GET /_slm/policy [[slm-api-execute]] -=== Execute Snapshot Lifecycle Policy API +=== Execute snapshot lifecycle policy API +++++ +Execute snapshot lifecycle policy +++++ + +Executes a snapshot lifecycle policy, immediately creating a snapshot +without waiting for the scheduled creation time. + + +[[slm-api-execute-request]] +==== {api-request-title} + +`PUT /_slm/policy//_execute` + + +[[slm-api-execute-desc]] +==== {api-description-title} Sometimes it can be useful to immediately execute a snapshot based on policy, perhaps before an upgrade or before performing other maintenance on indices. The execute snapshot policy API allows you to perform a snapshot immediately without waiting for a policy's scheduled invocation. -==== Path Parameters -`policy_id` (required):: - (string) Id of the policy to execute +[[slm-api-execute-path-params]] +==== {api-path-parms-title} -==== Example +``:: +(Required, string) +ID of the snapshot lifecycle policy to execute. -To take an immediate snapshot using a policy, use the following + +[[slm-api-execute-example]] +==== {api-examples-title} + +To take an immediate snapshot using a policy, use the following request: [source,console] -------------------------------------------------- @@ -339,7 +360,7 @@ POST /_slm/policy/daily-snapshots/_execute -------------------------------------------------- // TEST[skip:we can't easily handle snapshots from docs tests] -This API will immediately return with the generated snapshot name +This API returns the following response with the generated snapshot name: [source,console-result] -------------------------------------------------- @@ -450,8 +471,7 @@ POST /_slm/policy/daily-snapshots/_execute -------------------------------------------------- // TESTRESPONSE[skip:we can't handle snapshots in docs tests] -Now retriving the policy shows that the policy has successfully been executed: - +Now retrieving the policy shows that the policy has successfully been executed: [source,console] -------------------------------------------------- @@ -514,12 +534,22 @@ Which now includes the successful snapshot information: It is a good idea to test policies using the execute API to ensure they work. [[slm-get-stats]] -=== Get Snapshot Lifecycle Stats API +=== Get snapshot lifecycle stats API +++++ +Get snapshot lifecycle stats +++++ -SLM stores statistics on a global and per-policy level about actions taken. These stats can be -retrieved by using the following API: +Returns global and policy-level statistics about actions taken by {slm}. -==== Example + +[[slm-api-stats-request]] +==== {api-request-title} + +`GET /_slm/stats` + + +[[slm-api-stats-example]] +==== {api-examples-title} [source,console] -------------------------------------------------- @@ -527,7 +557,7 @@ GET /_slm/stats -------------------------------------------------- // TEST[continued] -Which returns a response similar to: +The API returns the following response: [source,js] -------------------------------------------------- @@ -546,19 +576,40 @@ Which returns a response similar to: -------------------------------------------------- // TESTRESPONSE[s/runs": 13/runs": $body.retention_runs/ s/_failed": 0/_failed": $body.retention_failed/ s/_timed_out": 0/_timed_out": $body.retention_timed_out/ s/"1.4s"/$body.retention_deletion_time/ s/1404/$body.retention_deletion_time_millis/ s/total_snapshots_taken": 1/total_snapshots_taken": $body.total_snapshots_taken/ s/total_snapshots_failed": 1/total_snapshots_failed": $body.total_snapshots_failed/ s/"policy_stats": [.*]/"policy_stats": $body.policy_stats/] + [[slm-api-delete]] -=== Delete Snapshot Lifecycle Policy API +=== Delete snapshot lifecycle policy API +++++ +Delete snapshot lifecycle policy +++++ + +Deletes an existing snapshot lifecycle policy. + + +[[slm-api-delete-request]] +==== {api-request-title} + +`DELETE /_slm/policy/` + + +[[slm-api-delete-desc]] +==== {api-description-title} A policy can be deleted by issuing a delete request with the policy id. Note that this prevents any future snapshots from being taken, but does not cancel any currently ongoing snapshots or remove any previously taken snapshots. -==== Path Parameters -`policy_id` (optional):: - (string) Id of the policy to remove. +[[slm-api-delete-path-params]] +==== {api-path-parms-title} -==== Example +``:: +(Required, string) +ID of the snapshot lifecycle policy to delete. + + +[[slm-api-delete-example]] +==== {api-examples-title} [source,console] -------------------------------------------------- @@ -566,23 +617,42 @@ DELETE /_slm/policy/daily-snapshots -------------------------------------------------- // TEST[continued] + [[slm-api-execute-retention]] -=== Execute Snapshot Lifecycle Retention API +=== Execute snapshot lifecycle retention API +++++ +Execute snapshot lifecycle retention +++++ + +Deletes any expired snapshots based on lifecycle policy retention rules. + + +[[slm-api-execute-retention-request]] +==== {api-request-title} + +`POST /_slm/_execute_retention` + + +[[slm-api-execute-retention-desc]] +==== {api-description-title} While Snapshot Lifecycle Management retention is usually invoked through the global cluster settings for its schedule, it can sometimes be useful to invoke a retention run to expunge expired snapshots immediately. This API allows you to run a one-off retention run. -==== Example -To immediately start snapshot retention, use the following +[[slm-api-execute-retention-example]] +==== {api-examples-title} + +To immediately start snapshot retention, use the following request: [source,console] -------------------------------------------------- POST /_slm/_execute_retention -------------------------------------------------- -This API will immediately return, as retention will be run asynchronously in the background: +This API returns the following response as retention runs asynchronously in the +background: [source,console-result] -------------------------------------------------- From f6f5efe141ef9626d324a76655269531bceb0149 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Mon, 14 Oct 2019 12:46:16 -0500 Subject: [PATCH 12/16] Add cloudId builder to the HLRC (#47868) Elastic cloud has a concept of a cloud Id. This Id is a base64 encoded url, split up into a few parts. This commit allows the user to pass in a cloud id now, which is translated to a HttpHost that is defined by the encoded parts therein. --- .../org/elasticsearch/client/RestClient.java | 30 ++++++++++++++++ .../client/RestClientBuilderTests.java | 34 +++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index a31732d7427..b791c9578a5 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -56,6 +56,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Base64; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -73,6 +74,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; +import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Collections.singletonList; /** @@ -119,6 +121,34 @@ public class RestClient implements Closeable { setNodes(nodes); } + /** + * Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation. + * Creates a new builder instance and sets the nodes that the client will send requests to. + * + * @param cloudId a valid elastic cloud cloudId that will route to a cluster. The cloudId is located in + * the user console https://cloud.elastic.co and will resemble a string like the following + * optionalHumanReadableName:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRlbGFzdGljc2VhcmNoJGtpYmFuYQ== + */ + public static RestClientBuilder builder(String cloudId) { + // there is an optional first portion of the cloudId that is a human readable string, but it is not used. + if (cloudId.contains(":")) { + if (cloudId.indexOf(":") == cloudId.length() - 1) { + throw new IllegalStateException("cloudId " + cloudId + " must begin with a human readable identifier followed by a colon"); + } + cloudId = cloudId.substring(cloudId.indexOf(":") + 1); + } + + String decoded = new String(Base64.getDecoder().decode(cloudId), UTF_8); + // once decoded the parts are separated by a $ character + String[] decodedParts = decoded.split("\\$"); + if (decodedParts.length != 3) { + throw new IllegalStateException("cloudId " + cloudId + " did not decode to a cluster identifier correctly"); + } + + String url = decodedParts[1] + "." + decodedParts[0]; + return builder(new HttpHost(url, 443, "https")); + } + /** * Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation. * Creates a new builder instance and sets the hosts that the client will send requests to. diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index 1e16d94076a..748dbfe2842 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -26,8 +26,10 @@ import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.message.BasicHeader; import java.io.IOException; +import java.util.Base64; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThat; @@ -159,6 +161,38 @@ public class RestClientBuilderTests extends RestClientTestCase { } } + public void testBuildCloudId() throws IOException { + String host = "us-east-1.aws.found.io"; + String esId = "elasticsearch"; + String kibanaId = "kibana"; + String toEncode = host + "$" + esId + "$" + kibanaId; + String encodedId = Base64.getEncoder().encodeToString(toEncode.getBytes(UTF8)); + assertNotNull(RestClient.builder(encodedId)); + assertNotNull(RestClient.builder("humanReadable:" + encodedId)); + + String badId = Base64.getEncoder().encodeToString("foo$bar".getBytes(UTF8)); + try { + RestClient.builder(badId); + fail("should have failed"); + } catch (IllegalStateException e) { + assertEquals("cloudId " + badId + " did not decode to a cluster identifier correctly", e.getMessage()); + } + + try { + RestClient.builder(badId + ":"); + fail("should have failed"); + } catch (IllegalStateException e) { + assertEquals("cloudId " + badId + ":" + " must begin with a human readable identifier followed by a colon", e.getMessage()); + } + + RestClient client = RestClient.builder(encodedId).build(); + assertThat(client.getNodes().size(), equalTo(1)); + assertThat(client.getNodes().get(0).getHost().getHostName(), equalTo(esId + "." + host)); + assertThat(client.getNodes().get(0).getHost().getPort(), equalTo(443)); + assertThat(client.getNodes().get(0).getHost().getSchemeName(), equalTo("https")); + client.close(); + } + public void testSetPathPrefixNull() { try { RestClient.builder(new HttpHost("localhost", 9200)).setPathPrefix(null); From 17d8ee9a9ce1cfc495fa73f273397cd1e9db39b3 Mon Sep 17 00:00:00 2001 From: Hendrik Muhs Date: Mon, 14 Oct 2019 20:11:14 +0200 Subject: [PATCH 13/16] [Transform] wait for deprecated index shards to get active (#47997) wait for deprecated index shards to get active --- .../transform/transforms/TransformPersistentTasksExecutor.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java index 64b299182d2..39e2ac1fbc0 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java @@ -109,7 +109,8 @@ public class TransformPersistentTasksExecutor extends PersistentTasksExecutor unavailableIndices = new ArrayList<>(indices.length); for (String index : indices) { IndexRoutingTable routingTable = clusterState.getRoutingTable().index(index); From 5a4745ae69a342582b51a8801b443d529862b0fd Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Mon, 14 Oct 2019 13:40:28 -0500 Subject: [PATCH 14/16] Re-enable Watcher full cluster restart test (#47950) (#48000) This test is believed to be fixed by #43939 closes #40178 --- .../org/elasticsearch/xpack/restart/FullClusterRestartIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index ae7cc95c1ab..a2ff126298a 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -135,7 +135,6 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { } @SuppressWarnings("unchecked") - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40178") public void testWatcher() throws Exception { if (isRunningAgainstOldCluster()) { logger.info("Adding a watch on old cluster {}", getOldClusterVersion()); From 8814bf07f1dd391828e0ddb3fcf5f95dfc5b4a2e Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Mon, 14 Oct 2019 13:39:08 -0600 Subject: [PATCH 15/16] Upgrade to Netty 4.1.42 (#48015) Upgrades the netty version. --- buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.38.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.42.Final.jar.sha1 | 1 + .../transport-netty4/licenses/netty-codec-4.1.38.Final.jar.sha1 | 1 - .../transport-netty4/licenses/netty-codec-4.1.42.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.38.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.42.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.38.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.42.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.38.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.42.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.38.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.42.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.38.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.42.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.38.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.42.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.38.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.42.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.38.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.42.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.38.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.42.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.38.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.42.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-resolver-4.1.38.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-resolver-4.1.42.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.38.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.42.Final.jar.sha1 | 1 + 29 files changed, 15 insertions(+), 15 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.38.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.42.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.38.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.42.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.38.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.42.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.38.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.42.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.38.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.42.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.38.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.42.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.38.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.42.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.38.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.42.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.38.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.42.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.38.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.42.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.38.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.42.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.38.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.42.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.38.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.42.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.38.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.42.Final.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 9aec1cbfe2b..404ea31a95e 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -21,7 +21,7 @@ slf4j = 1.6.2 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 4.5.1 -netty = 4.1.38.Final +netty = 4.1.42.Final joda = 2.10.3 # when updating this version, you need to ensure compatibility with: diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.38.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.38.Final.jar.sha1 deleted file mode 100644 index 5f99086039f..00000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.38.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d16cf15d29c409987cecde77407fbb6f1e16d262 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.42.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.42.Final.jar.sha1 new file mode 100644 index 00000000000..b357e167ad8 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.42.Final.jar.sha1 @@ -0,0 +1 @@ +6e6fc9178d1f1401aa0d6b843341efb91720f2cd \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.38.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.38.Final.jar.sha1 deleted file mode 100644 index 2db183d46ca..00000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.38.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ccfbdfc727cbf702350572a0b12fe92185ebf162 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.42.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.42.Final.jar.sha1 new file mode 100644 index 00000000000..04155e0ef8d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.42.Final.jar.sha1 @@ -0,0 +1 @@ +b1d5ed85a558fbbadc2783f869fbd0adcd32b07b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.38.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.38.Final.jar.sha1 deleted file mode 100644 index 0a75cdf9957..00000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.38.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d55b3cdb74cd140d262de96987ebd369125a64c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.42.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.42.Final.jar.sha1 new file mode 100644 index 00000000000..a995760e2a3 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.42.Final.jar.sha1 @@ -0,0 +1 @@ +5f71267aa784d0e6c5ec09fb988339d244b205a0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.38.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.38.Final.jar.sha1 deleted file mode 100644 index e6ccf03b910..00000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.38.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6f8aae763f743d91fb1ba1e9011dae0ef4f6ff34 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.42.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.42.Final.jar.sha1 new file mode 100644 index 00000000000..a6e599124e2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.42.Final.jar.sha1 @@ -0,0 +1 @@ +e02700b574d3a0e2100308f971f0753ac8700e7c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.38.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.38.Final.jar.sha1 deleted file mode 100644 index 10e2094ebbd..00000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.38.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ebf1f2bd0dad5e16aa1fc48d32e5dbe507b38d53 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.42.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.42.Final.jar.sha1 new file mode 100644 index 00000000000..a33d11cbe58 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.42.Final.jar.sha1 @@ -0,0 +1 @@ +fc6546be5df552d9729f008d8d41a6dee28127aa \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.38.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.38.Final.jar.sha1 deleted file mode 100644 index 01512737b8d..00000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.38.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b00be4aa309e9b56e498191aa8c73e4f393759ed \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.42.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.42.Final.jar.sha1 new file mode 100644 index 00000000000..51713e58cb5 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.42.Final.jar.sha1 @@ -0,0 +1 @@ +ccaacf418a9e486b65e82c47bed66439119c5fdb \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.38.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.38.Final.jar.sha1 deleted file mode 100644 index 197ce53e032..00000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.38.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd8b612d5daa42d1be3bb3203e4857597d5db79b \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.42.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.42.Final.jar.sha1 new file mode 100644 index 00000000000..bb0a654f35a --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.42.Final.jar.sha1 @@ -0,0 +1 @@ +857502e863c02c829fdafea61c3fda6bda01d0af \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.38.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.38.Final.jar.sha1 deleted file mode 100644 index 5f99086039f..00000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.38.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d16cf15d29c409987cecde77407fbb6f1e16d262 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.42.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.42.Final.jar.sha1 new file mode 100644 index 00000000000..b357e167ad8 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.42.Final.jar.sha1 @@ -0,0 +1 @@ +6e6fc9178d1f1401aa0d6b843341efb91720f2cd \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.38.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.38.Final.jar.sha1 deleted file mode 100644 index 2db183d46ca..00000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.38.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ccfbdfc727cbf702350572a0b12fe92185ebf162 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.42.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.42.Final.jar.sha1 new file mode 100644 index 00000000000..04155e0ef8d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.42.Final.jar.sha1 @@ -0,0 +1 @@ +b1d5ed85a558fbbadc2783f869fbd0adcd32b07b \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.38.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.38.Final.jar.sha1 deleted file mode 100644 index 0a75cdf9957..00000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.38.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d55b3cdb74cd140d262de96987ebd369125a64c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.42.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.42.Final.jar.sha1 new file mode 100644 index 00000000000..a995760e2a3 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.42.Final.jar.sha1 @@ -0,0 +1 @@ +5f71267aa784d0e6c5ec09fb988339d244b205a0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.38.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.38.Final.jar.sha1 deleted file mode 100644 index e6ccf03b910..00000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.38.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6f8aae763f743d91fb1ba1e9011dae0ef4f6ff34 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.42.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.42.Final.jar.sha1 new file mode 100644 index 00000000000..a6e599124e2 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.42.Final.jar.sha1 @@ -0,0 +1 @@ +e02700b574d3a0e2100308f971f0753ac8700e7c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.38.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.38.Final.jar.sha1 deleted file mode 100644 index 10e2094ebbd..00000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.38.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ebf1f2bd0dad5e16aa1fc48d32e5dbe507b38d53 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.42.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.42.Final.jar.sha1 new file mode 100644 index 00000000000..a33d11cbe58 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.42.Final.jar.sha1 @@ -0,0 +1 @@ +fc6546be5df552d9729f008d8d41a6dee28127aa \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.38.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.38.Final.jar.sha1 deleted file mode 100644 index 01512737b8d..00000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.38.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b00be4aa309e9b56e498191aa8c73e4f393759ed \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.42.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.42.Final.jar.sha1 new file mode 100644 index 00000000000..51713e58cb5 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.42.Final.jar.sha1 @@ -0,0 +1 @@ +ccaacf418a9e486b65e82c47bed66439119c5fdb \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.38.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.38.Final.jar.sha1 deleted file mode 100644 index 197ce53e032..00000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.38.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd8b612d5daa42d1be3bb3203e4857597d5db79b \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.42.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.42.Final.jar.sha1 new file mode 100644 index 00000000000..bb0a654f35a --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.42.Final.jar.sha1 @@ -0,0 +1 @@ +857502e863c02c829fdafea61c3fda6bda01d0af \ No newline at end of file From 300ddfa3c19bcf625c9ef45a86e0cf8dbae1c81c Mon Sep 17 00:00:00 2001 From: Gordon Brown Date: Mon, 14 Oct 2019 16:56:31 -0600 Subject: [PATCH 16/16] SLM Start/Stop HLRC and docs (#47966) This commit adds HLRC support and documentation for the SLM Start and Stop APIs, as well as updating existing documentation where appropriate. This commit also ensures that the SLM APIs are properly included in the HLRC documentation. --- .../client/IndexLifecycleClient.java | 101 +++++++++ .../IndexLifecycleRequestConverters.java | 42 ++++ ...pshotLifecycleManagementStatusRequest.java | 25 +++ .../client/slm/StartSLMRequest.java | 25 +++ .../client/slm/StopSLMRequest.java | 25 +++ .../documentation/ILMDocumentationIT.java | 211 +++++++++++++++--- ...pshot_lifecycle_management_status.asciidoc | 36 +++ ...art_snapshot_lifecycle_management.asciidoc | 36 +++ ...top_snapshot_lifecycle_management.asciidoc | 38 ++++ .../high-level/supported-apis.asciidoc | 29 +++ docs/reference/ilm/apis/slm-api.asciidoc | 167 +++++++++++++- 11 files changed, 704 insertions(+), 31 deletions(-) create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecycleManagementStatusRequest.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/slm/StartSLMRequest.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/slm/StopSLMRequest.java create mode 100644 docs/java-rest/high-level/ilm/snapshot_lifecycle_management_status.asciidoc create mode 100644 docs/java-rest/high-level/ilm/start_snapshot_lifecycle_management.asciidoc create mode 100644 docs/java-rest/high-level/ilm/stop_snapshot_lifecycle_management.asciidoc diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java index 54f381acb09..2fe3411ca46 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java @@ -43,6 +43,9 @@ import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyResponse; import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsRequest; import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsResponse; import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.slm.SnapshotLifecycleManagementStatusRequest; +import org.elasticsearch.client.slm.StartSLMRequest; +import org.elasticsearch.client.slm.StopSLMRequest; import java.io.IOException; @@ -540,4 +543,102 @@ public class IndexLifecycleClient { return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::getSnapshotLifecycleStats, options, GetSnapshotLifecycleStatsResponse::fromXContent, listener, emptySet()); } + + /** + * Start the Snapshot Lifecycle Management feature. + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-start-slm.html
+     * 
for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse startSLM(StartSLMRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::startSLM, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously start the Snapshot Lifecycle Management feature. + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-start-slm.html
+     * 
for more. + * @param request the request + * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request + */ + public Cancellable startSLMAsync(StartSLMRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::startSLM, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Stop the Snapshot Lifecycle Management feature. + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-stop-slm.html
+     * 
for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse stopSLM(StopSLMRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::stopSLM, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously stop the Snapshot Lifecycle Management feature. + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-stop-slm.html
+     * 
for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request + */ + public Cancellable stopSLMAsync(StopSLMRequest request, RequestOptions options, ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::stopSLM, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Get the status of Snapshot Lifecycle Management. + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-status.html
+     * 
for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public LifecycleManagementStatusResponse getSLMStatus(SnapshotLifecycleManagementStatusRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::snapshotLifecycleManagementStatus, + options, LifecycleManagementStatusResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously get the status of Snapshot Lifecycle Management. + * See
+     *  https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
+     *  java-rest-high-ilm-slm-status.html
+     * 
for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return cancellable that may be used to cancel the request + */ + public Cancellable getSLMStatusAsync(SnapshotLifecycleManagementStatusRequest request, RequestOptions options, + ActionListener listener) { + return restHighLevelClient.performRequestAsyncAndParseEntity(request, + IndexLifecycleRequestConverters::snapshotLifecycleManagementStatus, options, LifecycleManagementStatusResponse::fromXContent, + listener, emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java index 5c9212b598b..f4095077570 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java @@ -38,6 +38,9 @@ import org.elasticsearch.client.slm.ExecuteSnapshotLifecycleRetentionRequest; import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsRequest; import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest; +import org.elasticsearch.client.slm.SnapshotLifecycleManagementStatusRequest; +import org.elasticsearch.client.slm.StartSLMRequest; +import org.elasticsearch.client.slm.StopSLMRequest; import org.elasticsearch.common.Strings; import java.io.IOException; @@ -239,4 +242,43 @@ final class IndexLifecycleRequestConverters { request.addParameters(params.asMap()); return request; } + + static Request snapshotLifecycleManagementStatus(SnapshotLifecycleManagementStatusRequest snapshotLifecycleManagementStatusRequest){ + Request request = new Request(HttpGet.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_slm") + .addPathPartAsIs("status") + .build()); + RequestConverters.Params params = new RequestConverters.Params(); + params.withMasterTimeout(snapshotLifecycleManagementStatusRequest.masterNodeTimeout()); + params.withTimeout(snapshotLifecycleManagementStatusRequest.timeout()); + request.addParameters(params.asMap()); + return request; + } + + static Request startSLM(StartSLMRequest startSLMRequest) { + Request request = new Request(HttpPost.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_slm") + .addPathPartAsIs("start") + .build()); + RequestConverters.Params params = new RequestConverters.Params(); + params.withMasterTimeout(startSLMRequest.masterNodeTimeout()); + params.withTimeout(startSLMRequest.timeout()); + request.addParameters(params.asMap()); + return request; + } + + static Request stopSLM(StopSLMRequest stopSLMRequest) { + Request request = new Request(HttpPost.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_slm") + .addPathPartAsIs("stop") + .build()); + RequestConverters.Params params = new RequestConverters.Params(); + params.withMasterTimeout(stopSLMRequest.masterNodeTimeout()); + params.withTimeout(stopSLMRequest.timeout()); + request.addParameters(params.asMap()); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecycleManagementStatusRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecycleManagementStatusRequest.java new file mode 100644 index 00000000000..684c55cb487 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/SnapshotLifecycleManagementStatusRequest.java @@ -0,0 +1,25 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.slm; + +import org.elasticsearch.client.TimedRequest; + +public class SnapshotLifecycleManagementStatusRequest extends TimedRequest { +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/StartSLMRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/StartSLMRequest.java new file mode 100644 index 00000000000..6281acf2115 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/StartSLMRequest.java @@ -0,0 +1,25 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.slm; + +import org.elasticsearch.client.TimedRequest; + +public class StartSLMRequest extends TimedRequest { +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/StopSLMRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/StopSLMRequest.java new file mode 100644 index 00000000000..8d6a97cc8ba --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/slm/StopSLMRequest.java @@ -0,0 +1,25 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.slm; + +import org.elasticsearch.client.TimedRequest; + +public class StopSLMRequest extends TimedRequest { +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java index cdf1f5b6be8..687da75fc82 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java @@ -64,10 +64,13 @@ import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsRequest; import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsResponse; import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest; import org.elasticsearch.client.slm.SnapshotInvocationRecord; +import org.elasticsearch.client.slm.SnapshotLifecycleManagementStatusRequest; import org.elasticsearch.client.slm.SnapshotLifecyclePolicy; import org.elasticsearch.client.slm.SnapshotLifecyclePolicyMetadata; import org.elasticsearch.client.slm.SnapshotLifecycleStats; import org.elasticsearch.client.slm.SnapshotRetentionConfiguration; +import org.elasticsearch.client.slm.StartSLMRequest; +import org.elasticsearch.client.slm.StopSLMRequest; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -460,7 +463,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } - public void testStartStopStatus() throws Exception { + public void testILMStartStopStatus() throws Exception { RestHighLevelClient client = highLevelClient(); stopILM(client); @@ -776,7 +779,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { assertTrue(response.isAcknowledged()); //////// PUT - // tag::slm-put-snapshot-lifecycle-policy + // tag::slm-put-snapshot-lifecycle-policy-request Map config = new HashMap<>(); config.put("indices", Collections.singletonList("idx")); SnapshotRetentionConfiguration retention = @@ -786,7 +789,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { "my_repository", config, retention); PutSnapshotLifecyclePolicyRequest request = new PutSnapshotLifecyclePolicyRequest(policy); - // end::slm-put-snapshot-lifecycle-policy + // end::slm-put-snapshot-lifecycle-policy-request // tag::slm-put-snapshot-lifecycle-policy-execute AcknowledgedResponse resp = client.indexLifecycle() @@ -815,16 +818,16 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { // tag::slm-put-snapshot-lifecycle-policy-execute-async client.indexLifecycle().putSnapshotLifecyclePolicyAsync(request, - RequestOptions.DEFAULT, putListener); + RequestOptions.DEFAULT, putListener); // <1> // end::slm-put-snapshot-lifecycle-policy-execute-async //////// GET - // tag::slm-get-snapshot-lifecycle-policy + // tag::slm-get-snapshot-lifecycle-policy-request GetSnapshotLifecyclePolicyRequest getAllRequest = new GetSnapshotLifecyclePolicyRequest(); // <1> GetSnapshotLifecyclePolicyRequest getRequest = new GetSnapshotLifecyclePolicyRequest("policy_id"); // <2> - // end::slm-get-snapshot-lifecycle-policy + // end::slm-get-snapshot-lifecycle-policy-request // tag::slm-get-snapshot-lifecycle-policy-execute GetSnapshotLifecyclePolicyResponse getResponse = @@ -851,7 +854,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { // tag::slm-get-snapshot-lifecycle-policy-execute-async client.indexLifecycle().getSnapshotLifecyclePolicyAsync(getRequest, - RequestOptions.DEFAULT, getListener); + RequestOptions.DEFAULT, getListener); // <1> // end::slm-get-snapshot-lifecycle-policy-execute-async assertThat(getResponse.getPolicies().size(), equalTo(1)); @@ -879,10 +882,10 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); //////// EXECUTE - // tag::slm-execute-snapshot-lifecycle-policy + // tag::slm-execute-snapshot-lifecycle-policy-request ExecuteSnapshotLifecyclePolicyRequest executeRequest = new ExecuteSnapshotLifecyclePolicyRequest("policy_id"); // <1> - // end::slm-execute-snapshot-lifecycle-policy + // end::slm-execute-snapshot-lifecycle-policy-request // tag::slm-execute-snapshot-lifecycle-policy-execute ExecuteSnapshotLifecyclePolicyResponse executeResponse = @@ -937,7 +940,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { // tag::slm-execute-snapshot-lifecycle-policy-execute-async client.indexLifecycle() .executeSnapshotLifecyclePolicyAsync(executeRequest, - RequestOptions.DEFAULT, executeListener); + RequestOptions.DEFAULT, executeListener); // <1> // end::slm-execute-snapshot-lifecycle-policy-execute-async latch.await(5, TimeUnit.SECONDS); @@ -958,42 +961,50 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { greaterThanOrEqualTo(1L)); //////// DELETE - // tag::slm-delete-snapshot-lifecycle-policy + // tag::slm-delete-snapshot-lifecycle-policy-request DeleteSnapshotLifecyclePolicyRequest deleteRequest = new DeleteSnapshotLifecyclePolicyRequest("policy_id"); // <1> - // end::slm-delete-snapshot-lifecycle-policy + // end::slm-delete-snapshot-lifecycle-policy-request // tag::slm-delete-snapshot-lifecycle-policy-execute AcknowledgedResponse deleteResp = client.indexLifecycle() .deleteSnapshotLifecyclePolicy(deleteRequest, RequestOptions.DEFAULT); // end::slm-delete-snapshot-lifecycle-policy-execute + + // tag::slm-delete-snapshot-lifecycle-policy-response + boolean deleteAcknowledged = deleteResp.isAcknowledged(); // <1> + // end::slm-delete-snapshot-lifecycle-policy-response + assertTrue(deleteResp.isAcknowledged()); - ActionListener deleteListener = new ActionListener() { - @Override - public void onResponse(AcknowledgedResponse resp) { - // no-op - } + // tag::slm-delete-snapshot-lifecycle-policy-execute-listener + ActionListener deleteListener = + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse resp) { + boolean deleteAcknowledged = resp.isAcknowledged(); // <1> + } - @Override - public void onFailure(Exception e) { - // no-op - } - }; + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::slm-delete-snapshot-lifecycle-policy-execute-listener // tag::slm-delete-snapshot-lifecycle-policy-execute-async client.indexLifecycle() .deleteSnapshotLifecyclePolicyAsync(deleteRequest, - RequestOptions.DEFAULT, deleteListener); + RequestOptions.DEFAULT, deleteListener); // <1> // end::slm-delete-snapshot-lifecycle-policy-execute-async assertTrue(deleteResp.isAcknowledged()); //////// EXECUTE RETENTION - // tag::slm-execute-snapshot-lifecycle-retention + // tag::slm-execute-snapshot-lifecycle-retention-request ExecuteSnapshotLifecycleRetentionRequest req = new ExecuteSnapshotLifecycleRetentionRequest(); - // end::slm-execute-snapshot-lifecycle-retention + // end::slm-execute-snapshot-lifecycle-retention-request // tag::slm-execute-snapshot-lifecycle-retention-execute AcknowledgedResponse retentionResp = @@ -1006,7 +1017,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { final boolean acked = retentionResp.isAcknowledged(); // end::slm-execute-snapshot-lifecycle-retention-response - // tag::slm-execute-snapshot-lifecycle-policy-execute-listener + // tag::slm-execute-snapshot-lifecycle-retention-execute-listener ActionListener retentionListener = new ActionListener() { @Override @@ -1024,7 +1035,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { // tag::slm-execute-snapshot-lifecycle-retention-execute-async client.indexLifecycle() .executeSnapshotLifecycleRetentionAsync(req, - RequestOptions.DEFAULT, retentionListener); + RequestOptions.DEFAULT, retentionListener); // <1> // end::slm-execute-snapshot-lifecycle-retention-execute-async } @@ -1051,6 +1062,152 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { }); } + public void testSLMStartStopStatus() throws Exception { + RestHighLevelClient client = highLevelClient(); + + stopSLM(client); + + // tag::slm-status-request + SnapshotLifecycleManagementStatusRequest request = + new SnapshotLifecycleManagementStatusRequest(); + // end::slm-status-request + + // Check that SLM has stopped + { + // tag::slm-status-execute + LifecycleManagementStatusResponse response = + client.indexLifecycle() + .getSLMStatus(request, RequestOptions.DEFAULT); + // end::slm-status-execute + + // tag::slm-status-response + OperationMode operationMode = response.getOperationMode(); // <1> + // end::slm-status-response + + assertThat(operationMode, Matchers.either(equalTo(OperationMode.STOPPING)).or(equalTo(OperationMode.STOPPED))); + } + + startSLM(client); + + // tag::slm-status-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse( + LifecycleManagementStatusResponse response) { + OperationMode operationMode = response + .getOperationMode(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::slm-status-execute-listener + + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::slm-status-execute-async + client.indexLifecycle().getSLMStatusAsync(request, + RequestOptions.DEFAULT, listener); // <1> + // end::slm-status-execute-async + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + // Check that SLM is running again + LifecycleManagementStatusResponse response = + client.indexLifecycle() + .getSLMStatus(request, RequestOptions.DEFAULT); + + OperationMode operationMode = response.getOperationMode(); + assertEquals(OperationMode.RUNNING, operationMode); + } + + private void stopSLM(RestHighLevelClient client) throws IOException, InterruptedException { + // tag::slm-stop-slm-request + StopSLMRequest request = new StopSLMRequest(); + // end::slm-stop-slm-request + + // tag::slm-stop-slm-execute + AcknowledgedResponse response = client.indexLifecycle() + .stopSLM(request, RequestOptions.DEFAULT); + // end::slm-stop-slm-execute + + // tag::slm-stop-slm-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::slm-stop-slm-response + assertTrue(acknowledged); + + // tag::slm-stop-slm-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + boolean acknowledged = response.isAcknowledged(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::slm-stop-slm-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::slm-stop-slm-execute-async + client.indexLifecycle().stopSLMAsync(request, + RequestOptions.DEFAULT, listener); // <1> + // end::slm-stop-slm-execute-async + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + private void startSLM(RestHighLevelClient client) throws IOException, InterruptedException { + // tag::slm-start-slm-request + StartSLMRequest request1 = new StartSLMRequest(); + // end::slm-start-slm-request + + // tag::slm-start-slm-execute + AcknowledgedResponse response = client.indexLifecycle() + .startSLM(request1, RequestOptions.DEFAULT); + // end::slm-start-slm-execute + + // tag::slm-start-slm-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::slm-start-slm-response + + assertTrue(acknowledged); + + // tag::slm-start-slm-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + boolean acknowledged = response.isAcknowledged(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::slm-start-slm-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::slm-start-slm-execute-async + client.indexLifecycle().startSLMAsync(request1, + RequestOptions.DEFAULT, listener); // <1> + // end::slm-start-slm-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + static Map toMap(Response response) throws IOException { return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); } diff --git a/docs/java-rest/high-level/ilm/snapshot_lifecycle_management_status.asciidoc b/docs/java-rest/high-level/ilm/snapshot_lifecycle_management_status.asciidoc new file mode 100644 index 00000000000..ae6986711bc --- /dev/null +++ b/docs/java-rest/high-level/ilm/snapshot_lifecycle_management_status.asciidoc @@ -0,0 +1,36 @@ +-- +:api: slm-status +:request: SnapshotLifecycleManagementStatusRequest +:response: AcknowledgedResponse +-- +[role="xpack"] +[id="{upid}-{api}"] +=== Snapshot Lifecycle Management Status API + + +[id="{upid}-{api}-request"] +==== Request + +The Snapshot Lifecycle Management Status API allows you to retrieve the status +of Snapshot Lifecycle Management + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- + + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates the status of Snapshot Lifecycle Management. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The returned status can be `RUNNING`, `STOPPING`, or `STOPPED`. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/ilm/start_snapshot_lifecycle_management.asciidoc b/docs/java-rest/high-level/ilm/start_snapshot_lifecycle_management.asciidoc new file mode 100644 index 00000000000..b359f237ea5 --- /dev/null +++ b/docs/java-rest/high-level/ilm/start_snapshot_lifecycle_management.asciidoc @@ -0,0 +1,36 @@ +-- +:api: slm-start-slm +:request: StartSLMRequest +:response: AcknowledgedResponse +-- +[role="xpack"] +[id="{upid}-{api}"] +=== Start Snapshot Lifecycle Management API + + +[id="{upid}-{api}-request"] +==== Request + +The Start Snapshot Lifecycle Management API allows you to start Snapshot +Lifecycle Management if it has previously been stopped. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- + + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the request to start Snapshot Lifecycle +Management was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the request to start Snapshot Lifecycle Management was +acknowledged. + +include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/ilm/stop_snapshot_lifecycle_management.asciidoc b/docs/java-rest/high-level/ilm/stop_snapshot_lifecycle_management.asciidoc new file mode 100644 index 00000000000..3f54341d430 --- /dev/null +++ b/docs/java-rest/high-level/ilm/stop_snapshot_lifecycle_management.asciidoc @@ -0,0 +1,38 @@ +-- +:api: slm-stop-slm +:request: StopSLMRequest +:response: AcknowledgedResponse +-- +[role="xpack"] +[id="{upid}-{api}"] +=== Stop Snapshot Lifecycle Management API + + +[id="{upid}-{api}-request"] +==== Request + +The Stop Snapshot Management API allows you to stop Snapshot Lifecycle +Management temporarily. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- + + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the request to stop Snapshot +Lifecycle Management was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the request to stop Snapshot Lifecycle Management was +acknowledged. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index de5570e22d5..a6975a97326 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -579,6 +579,35 @@ include::ilm/lifecycle_management_status.asciidoc[] include::ilm/retry_lifecycle_policy.asciidoc[] include::ilm/remove_lifecycle_policy_from_index.asciidoc[] +[role="xpack"] +== Snapshot Lifecycle Management APIs + +:upid: {mainid}-ilm +:doc-tests-file: {doc-tests}/ILMDocumentationIT.java + +The Java High Level REST Client supports the following Snapshot Lifecycle +Management APIs: + +* <<{upid}-slm-put-snapshot-lifecycle-policy>> +* <<{upid}-slm-delete-snapshot-lifecycle-policy>> +* <<{upid}-ilm-get-lifecycle-policy>> +* <<{upid}-slm-start-slm>> +* <<{upid}-slm-stop-slm>> +* <<{upid}-slm-status>> +* <<{upid}-slm-execute-snapshot-lifecycle-policy>> +* <<{upid}-slm-execute-snapshot-lifecycle-retention>> + + +include::ilm/put_snapshot_lifecycle_policy.asciidoc[] +include::ilm/delete_snapshot_lifecycle_policy.asciidoc[] +include::ilm/get_snapshot_lifecycle_policy.asciidoc[] +include::ilm/start_snapshot_lifecycle_management.asciidoc[] +include::ilm/stop_snapshot_lifecycle_management.asciidoc[] +include::ilm/snapshot_lifecycle_management_status.asciidoc[] +include::ilm/execute_snapshot_lifecycle_policy.asciidoc[] +include::ilm/execute_snapshot_lifecycle_retention.asciidoc[] + + [role="xpack"] [[transform_apis]] == {transform-cap} APIs diff --git a/docs/reference/ilm/apis/slm-api.asciidoc b/docs/reference/ilm/apis/slm-api.asciidoc index c686d21c91d..44c11cae525 100644 --- a/docs/reference/ilm/apis/slm-api.asciidoc +++ b/docs/reference/ilm/apis/slm-api.asciidoc @@ -15,10 +15,9 @@ SLM policy management is split into three different CRUD APIs, a way to put or u policies, a way to retrieve policies, and a way to delete unwanted policies, as well as a separate API for immediately invoking a snapshot based on a policy. -Since SLM falls under the same category as ILM, it is stopped and started by -using the <> ILM APIs. It is, however, managed -by a different enable setting. To disable SLM's functionality, set the cluster -setting `xpack.slm.enabled` to `false` in elasticsearch.yml. +SLM can be stopped temporarily and restarted using the <> and +<> APIs. To disable SLM's functionality entirely, set the +cluster setting `xpack.slm.enabled` to `false` in elasticsearch.yml. [[slm-api-put]] === Put snapshot lifecycle policy API @@ -661,3 +660,163 @@ background: } -------------------------------------------------- +[[slm-stop]] +=== Stop Snapshot Lifecycle Management API + +[subs="attributes"] +++++ +Stop Snapshot Lifecycle Management +++++ + +Stop the Snapshot Lifecycle Management (SLM) plugin. + +[[slm-stop-request]] +==== {api-request-title} + +`POST /_ilm/stop` + +[[slm-stop-desc]] +==== {api-description-title} + +Halts all snapshot lifecycle management operations and stops the SLM plugin. +This is useful when you are performing maintenance on the cluster and need to +prevent SLM from performing any actions on your indices. Note that this API does +not stop any snapshots that are currently in progress, and that snapshots can +still be taken manually via the <> even +when SLM is stopped. + +The API returns as soon as the stop request has been acknowledged, but the +plugin might continue to run until in-progress operations complete and the plugin +can be safely stopped. Use the <> API to see +if SLM is running. + +==== Request Parameters + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + +==== Authorization + +You must have the `manage_slm` cluster privilege to use this API. +For more information, see <>. + +[[slm-stop-example]] +==== {api-examples-title} + +Stops the SLM plugin. + +[source,console] +-------------------------------------------------- +POST _slm/stop +-------------------------------------------------- +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,console-result] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- + +[[slm-start]] +=== Start Snapshot Lifecycle Management API + +[subs="attributes"] +++++ +Start Snapshot Lifecycle Management +++++ + +Start the Snapshot Lifecycle Management (SLM) plugin. + +[[slm-start-request]] +==== {api-request-title} + +`POST /_slm/start` + +[[slm-start-desc]] +==== {api-description-title} + +Starts the SLM plugin if it is currently stopped. SLM is started +automatically when the cluster is formed. Restarting SLM is only +necessary if it has been stopped using the <>. + +==== Request Parameters + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + +==== Authorization + +You must have the `manage_slm` cluster privilege to use this API. +For more information, see <>. + +[[slm-start-example]] +==== {api-examples-title} + +Starts the SLM plugin. + +[source,console] +-------------------------------------------------- +POST _slm/start +-------------------------------------------------- +// TEST[continued] + +If the request succeeds, you receive the following result: + +[source,console-result] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- + +[[slm-get-status]] +=== Get Snapshot Lifecycle Management status API + +[subs="attributes"] +++++ +Get Snapshot Lifecycle Management status +++++ + +Retrieves the current Snapshot Lifecycle Management (SLM) status. + +[[slm-get-status-request]] +==== {api-request-title} + +`GET /_slm/status` + +[[slm-get-status-desc]] +==== {api-description-title} + +Returns the status of the SLM plugin. The `operation_mode` field in the +response shows one of three states: `STARTED`, `STOPPING`, +or `STOPPED`. You can change the status of the SLM plugin with the +<> and <> APIs. + +==== Request Parameters + +include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms] + +==== Authorization + +You must have the `manage_slm` or `read_slm` or both cluster privileges to use this API. +For more information, see <>. + +[[slm-get-status-example]] +==== {api-examples-title} + +Gets the SLM plugin status. + +[source,console] +-------------------------------------------------- +GET _slm/status +-------------------------------------------------- + +If the request succeeds, the body of the response shows the operation mode: + +[source,console-result] +-------------------------------------------------- +{ + "operation_mode": "RUNNING" +} +--------------------------------------------------