Merge remote-tracking branch 'es/7.x' into enrich-7.x

This commit is contained in:
Martijn van Groningen 2019-10-15 07:23:47 +02:00
commit cc4b6c43b3
No known key found for this signature in database
GPG Key ID: AB236F4FCF2AF12A
101 changed files with 1987 additions and 252 deletions

View File

@ -21,7 +21,7 @@ slf4j = 1.6.2
# when updating the JNA version, also update the version in buildSrc/build.gradle
jna = 4.5.1
netty = 4.1.38.Final
netty = 4.1.42.Final
joda = 2.10.3
# when updating this version, you need to ensure compatibility with:

View File

@ -30,10 +30,12 @@ import org.elasticsearch.client.ccr.FollowStatsResponse;
import org.elasticsearch.client.ccr.ForgetFollowerRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse;
import org.elasticsearch.client.ccr.PauseAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PauseFollowRequest;
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PutFollowRequest;
import org.elasticsearch.client.ccr.PutFollowResponse;
import org.elasticsearch.client.ccr.ResumeAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.ResumeFollowRequest;
import org.elasticsearch.client.ccr.UnfollowRequest;
import org.elasticsearch.client.core.AcknowledgedResponse;
@ -410,6 +412,92 @@ public final class CcrClient {
);
}
/**
* Pauses an auto follow pattern.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-pause-auto-follow-pattern.html">
* the docs</a> for more.
*
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public AcknowledgedResponse pauseAutoFollowPattern(PauseAutoFollowPatternRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(
request,
CcrRequestConverters::pauseAutoFollowPattern,
options,
AcknowledgedResponse::fromXContent,
Collections.emptySet()
);
}
/**
* Asynchronously pauses an auto follow pattern.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-pause-auto-follow-pattern.html">
* the docs</a> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
* @return cancellable that may be used to cancel the request
*/
public Cancellable pauseAutoFollowPatternAsync(PauseAutoFollowPatternRequest request,
RequestOptions options,
ActionListener<AcknowledgedResponse> listener) {
return restHighLevelClient.performRequestAsyncAndParseEntity(
request,
CcrRequestConverters::pauseAutoFollowPattern,
options,
AcknowledgedResponse::fromXContent,
listener,
Collections.emptySet());
}
/**
* Resumes an auto follow pattern.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-resume-auto-follow-pattern.html">
* the docs</a> for more.
*
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public AcknowledgedResponse resumeAutoFollowPattern(ResumeAutoFollowPatternRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(
request,
CcrRequestConverters::resumeAutoFollowPattern,
options,
AcknowledgedResponse::fromXContent,
Collections.emptySet()
);
}
/**
* Asynchronously resumes an auto follow pattern.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-resume-auto-follow-pattern.html">
* the docs</a> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
* @return cancellable that may be used to cancel the request
*/
public Cancellable resumeAutoFollowPatternAsync(ResumeAutoFollowPatternRequest request,
RequestOptions options,
ActionListener<AcknowledgedResponse> listener) {
return restHighLevelClient.performRequestAsyncAndParseEntity(
request,
CcrRequestConverters::resumeAutoFollowPattern,
options,
AcknowledgedResponse::fromXContent,
listener,
Collections.emptySet());
}
/**
* Gets all CCR stats.
*

View File

@ -29,9 +29,11 @@ import org.elasticsearch.client.ccr.FollowInfoRequest;
import org.elasticsearch.client.ccr.FollowStatsRequest;
import org.elasticsearch.client.ccr.ForgetFollowerRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PauseAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PauseFollowRequest;
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PutFollowRequest;
import org.elasticsearch.client.ccr.ResumeAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.ResumeFollowRequest;
import org.elasticsearch.client.ccr.UnfollowRequest;
@ -118,6 +120,24 @@ final class CcrRequestConverters {
return new Request(HttpGet.METHOD_NAME, endpoint);
}
static Request pauseAutoFollowPattern(PauseAutoFollowPatternRequest pauseAutoFollowPatternRequest) throws IOException {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_ccr", "auto_follow")
.addPathPart(pauseAutoFollowPatternRequest.getName())
.addPathPartAsIs("pause")
.build();
return new Request(HttpPost.METHOD_NAME, endpoint);
}
static Request resumeAutoFollowPattern(ResumeAutoFollowPatternRequest resumeAutoFollowPatternRequest) throws IOException {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_ccr", "auto_follow")
.addPathPart(resumeAutoFollowPatternRequest.getName())
.addPathPartAsIs("resume")
.build();
return new Request(HttpPost.METHOD_NAME, endpoint);
}
static Request getCcrStats(CcrStatsRequest ccrStatsRequest) {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_ccr", "stats")

View File

@ -43,6 +43,9 @@ import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyResponse;
import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsRequest;
import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsResponse;
import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.slm.SnapshotLifecycleManagementStatusRequest;
import org.elasticsearch.client.slm.StartSLMRequest;
import org.elasticsearch.client.slm.StopSLMRequest;
import java.io.IOException;
@ -540,4 +543,102 @@ public class IndexLifecycleClient {
return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::getSnapshotLifecycleStats,
options, GetSnapshotLifecycleStatsResponse::fromXContent, listener, emptySet());
}
/**
* Start the Snapshot Lifecycle Management feature.
* See <pre>
* https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
* java-rest-high-ilm-slm-start-slm.html
* </pre> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public AcknowledgedResponse startSLM(StartSLMRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::startSLM, options,
AcknowledgedResponse::fromXContent, emptySet());
}
/**
* Asynchronously start the Snapshot Lifecycle Management feature.
* See <pre>
* https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
* java-rest-high-ilm-slm-start-slm.html
* </pre> for more.
* @param request the request
* @param listener the listener to be notified upon request completion
* @return cancellable that may be used to cancel the request
*/
public Cancellable startSLMAsync(StartSLMRequest request, RequestOptions options, ActionListener<AcknowledgedResponse> listener) {
return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::startSLM, options,
AcknowledgedResponse::fromXContent, listener, emptySet());
}
/**
* Stop the Snapshot Lifecycle Management feature.
* See <pre>
* https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
* java-rest-high-ilm-slm-stop-slm.html
* </pre> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public AcknowledgedResponse stopSLM(StopSLMRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::stopSLM, options,
AcknowledgedResponse::fromXContent, emptySet());
}
/**
* Asynchronously stop the Snapshot Lifecycle Management feature.
* See <pre>
* https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
* java-rest-high-ilm-slm-stop-slm.html
* </pre> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
* @return cancellable that may be used to cancel the request
*/
public Cancellable stopSLMAsync(StopSLMRequest request, RequestOptions options, ActionListener<AcknowledgedResponse> listener) {
return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::stopSLM, options,
AcknowledgedResponse::fromXContent, listener, emptySet());
}
/**
* Get the status of Snapshot Lifecycle Management.
* See <pre>
* https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
* java-rest-high-ilm-slm-status.html
* </pre> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public LifecycleManagementStatusResponse getSLMStatus(SnapshotLifecycleManagementStatusRequest request,
RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::snapshotLifecycleManagementStatus,
options, LifecycleManagementStatusResponse::fromXContent, emptySet());
}
/**
* Asynchronously get the status of Snapshot Lifecycle Management.
* See <pre>
* https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
* java-rest-high-ilm-slm-status.html
* </pre> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
* @return cancellable that may be used to cancel the request
*/
public Cancellable getSLMStatusAsync(SnapshotLifecycleManagementStatusRequest request, RequestOptions options,
ActionListener<LifecycleManagementStatusResponse> listener) {
return restHighLevelClient.performRequestAsyncAndParseEntity(request,
IndexLifecycleRequestConverters::snapshotLifecycleManagementStatus, options, LifecycleManagementStatusResponse::fromXContent,
listener, emptySet());
}
}

View File

@ -38,6 +38,9 @@ import org.elasticsearch.client.slm.ExecuteSnapshotLifecycleRetentionRequest;
import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsRequest;
import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.slm.SnapshotLifecycleManagementStatusRequest;
import org.elasticsearch.client.slm.StartSLMRequest;
import org.elasticsearch.client.slm.StopSLMRequest;
import org.elasticsearch.common.Strings;
import java.io.IOException;
@ -239,4 +242,43 @@ final class IndexLifecycleRequestConverters {
request.addParameters(params.asMap());
return request;
}
static Request snapshotLifecycleManagementStatus(SnapshotLifecycleManagementStatusRequest snapshotLifecycleManagementStatusRequest){
Request request = new Request(HttpGet.METHOD_NAME,
new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_slm")
.addPathPartAsIs("status")
.build());
RequestConverters.Params params = new RequestConverters.Params();
params.withMasterTimeout(snapshotLifecycleManagementStatusRequest.masterNodeTimeout());
params.withTimeout(snapshotLifecycleManagementStatusRequest.timeout());
request.addParameters(params.asMap());
return request;
}
static Request startSLM(StartSLMRequest startSLMRequest) {
Request request = new Request(HttpPost.METHOD_NAME,
new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_slm")
.addPathPartAsIs("start")
.build());
RequestConverters.Params params = new RequestConverters.Params();
params.withMasterTimeout(startSLMRequest.masterNodeTimeout());
params.withTimeout(startSLMRequest.timeout());
request.addParameters(params.asMap());
return request;
}
static Request stopSLM(StopSLMRequest stopSLMRequest) {
Request request = new Request(HttpPost.METHOD_NAME,
new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_slm")
.addPathPartAsIs("stop")
.build());
RequestConverters.Params params = new RequestConverters.Params();
params.withMasterTimeout(stopSLMRequest.masterNodeTimeout());
params.withTimeout(stopSLMRequest.timeout());
request.addParameters(params.asMap());
return request;
}
}

View File

@ -0,0 +1,45 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ccr;
import org.elasticsearch.client.Validatable;
import java.util.Objects;
/**
* Request class for pause auto follow pattern api.
*/
public final class PauseAutoFollowPatternRequest implements Validatable {
private final String name;
/**
* Pause auto follow pattern with the specified name
*
* @param name The name of the auto follow pattern to pause
*/
public PauseAutoFollowPatternRequest(String name) {
this.name = Objects.requireNonNull(name);
}
public String getName() {
return name;
}
}

View File

@ -0,0 +1,45 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ccr;
import org.elasticsearch.client.Validatable;
import java.util.Objects;
/**
* Request class for resume auto follow pattern api.
*/
public final class ResumeAutoFollowPatternRequest implements Validatable {
private final String name;
/**
* Resume auto follow pattern with the specified name
*
* @param name The name of the auto follow pattern to resume
*/
public ResumeAutoFollowPatternRequest(String name) {
this.name = Objects.requireNonNull(name);
}
public String getName() {
return name;
}
}

View File

@ -62,6 +62,7 @@ public class DatafeedConfig implements ToXContentObject {
public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields");
public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config");
public static final ParseField DELAYED_DATA_CHECK_CONFIG = new ParseField("delayed_data_check_config");
public static final ParseField MAX_EMPTY_SEARCHES = new ParseField("max_empty_searches");
public static final ConstructingObjectParser<Builder, Void> PARSER = new ConstructingObjectParser<>(
"datafeed_config", true, a -> new Builder((String)a[0], (String)a[1]));
@ -88,6 +89,7 @@ public class DatafeedConfig implements ToXContentObject {
PARSER.declareInt(Builder::setScrollSize, SCROLL_SIZE);
PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, CHUNKING_CONFIG);
PARSER.declareObject(Builder::setDelayedDataCheckConfig, DelayedDataCheckConfig.PARSER, DELAYED_DATA_CHECK_CONFIG);
PARSER.declareInt(Builder::setMaxEmptySearches, MAX_EMPTY_SEARCHES);
}
private static BytesReference parseBytes(XContentParser parser) throws IOException {
@ -107,11 +109,12 @@ public class DatafeedConfig implements ToXContentObject {
private final Integer scrollSize;
private final ChunkingConfig chunkingConfig;
private final DelayedDataCheckConfig delayedDataCheckConfig;
private final Integer maxEmptySearches;
private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices, BytesReference query,
BytesReference aggregations, List<SearchSourceBuilder.ScriptField> scriptFields, Integer scrollSize,
ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) {
ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig,
Integer maxEmptySearches) {
this.id = id;
this.jobId = jobId;
this.queryDelay = queryDelay;
@ -123,6 +126,7 @@ public class DatafeedConfig implements ToXContentObject {
this.scrollSize = scrollSize;
this.chunkingConfig = chunkingConfig;
this.delayedDataCheckConfig = delayedDataCheckConfig;
this.maxEmptySearches = maxEmptySearches;
}
public String getId() {
@ -169,6 +173,10 @@ public class DatafeedConfig implements ToXContentObject {
return delayedDataCheckConfig;
}
public Integer getMaxEmptySearches() {
return maxEmptySearches;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
@ -205,6 +213,9 @@ public class DatafeedConfig implements ToXContentObject {
if (delayedDataCheckConfig != null) {
builder.field(DELAYED_DATA_CHECK_CONFIG.getPreferredName(), delayedDataCheckConfig);
}
if (maxEmptySearches != null) {
builder.field(MAX_EMPTY_SEARCHES.getPreferredName(), maxEmptySearches);
}
builder.endObject();
return builder;
@ -245,7 +256,8 @@ public class DatafeedConfig implements ToXContentObject {
&& Objects.equals(asMap(this.aggregations), asMap(that.aggregations))
&& Objects.equals(this.scriptFields, that.scriptFields)
&& Objects.equals(this.chunkingConfig, that.chunkingConfig)
&& Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig);
&& Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig)
&& Objects.equals(this.maxEmptySearches, that.maxEmptySearches);
}
/**
@ -256,7 +268,7 @@ public class DatafeedConfig implements ToXContentObject {
@Override
public int hashCode() {
return Objects.hash(id, jobId, frequency, queryDelay, indices, asMap(query), scrollSize, asMap(aggregations), scriptFields,
chunkingConfig, delayedDataCheckConfig);
chunkingConfig, delayedDataCheckConfig, maxEmptySearches);
}
public static Builder builder(String id, String jobId) {
@ -276,6 +288,7 @@ public class DatafeedConfig implements ToXContentObject {
private Integer scrollSize;
private ChunkingConfig chunkingConfig;
private DelayedDataCheckConfig delayedDataCheckConfig;
private Integer maxEmptySearches;
public Builder(String id, String jobId) {
this.id = Objects.requireNonNull(id, ID.getPreferredName());
@ -294,6 +307,7 @@ public class DatafeedConfig implements ToXContentObject {
this.scrollSize = config.scrollSize;
this.chunkingConfig = config.chunkingConfig;
this.delayedDataCheckConfig = config.getDelayedDataCheckConfig();
this.maxEmptySearches = config.getMaxEmptySearches();
}
public Builder setIndices(List<String> indices) {
@ -376,9 +390,14 @@ public class DatafeedConfig implements ToXContentObject {
return this;
}
public Builder setMaxEmptySearches(int maxEmptySearches) {
this.maxEmptySearches = maxEmptySearches;
return this;
}
public DatafeedConfig build() {
return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, query, aggregations, scriptFields, scrollSize,
chunkingConfig, delayedDataCheckConfig);
chunkingConfig, delayedDataCheckConfig, maxEmptySearches);
}
private static BytesReference xContentToBytes(ToXContentObject object) throws IOException {

View File

@ -79,6 +79,7 @@ public class DatafeedUpdate implements ToXContentObject {
PARSER.declareObject(Builder::setDelayedDataCheckConfig,
DelayedDataCheckConfig.PARSER,
DatafeedConfig.DELAYED_DATA_CHECK_CONFIG);
PARSER.declareInt(Builder::setMaxEmptySearches, DatafeedConfig.MAX_EMPTY_SEARCHES);
}
private static BytesReference parseBytes(XContentParser parser) throws IOException {
@ -98,10 +99,12 @@ public class DatafeedUpdate implements ToXContentObject {
private final Integer scrollSize;
private final ChunkingConfig chunkingConfig;
private final DelayedDataCheckConfig delayedDataCheckConfig;
private final Integer maxEmptySearches;
private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices, BytesReference query,
BytesReference aggregations, List<SearchSourceBuilder.ScriptField> scriptFields, Integer scrollSize,
ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) {
ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig,
Integer maxEmptySearches) {
this.id = id;
this.jobId = jobId;
this.queryDelay = queryDelay;
@ -113,6 +116,7 @@ public class DatafeedUpdate implements ToXContentObject {
this.scrollSize = scrollSize;
this.chunkingConfig = chunkingConfig;
this.delayedDataCheckConfig = delayedDataCheckConfig;
this.maxEmptySearches = maxEmptySearches;
}
/**
@ -152,6 +156,7 @@ public class DatafeedUpdate implements ToXContentObject {
}
addOptionalField(builder, DatafeedConfig.SCROLL_SIZE, scrollSize);
addOptionalField(builder, DatafeedConfig.CHUNKING_CONFIG, chunkingConfig);
addOptionalField(builder, DatafeedConfig.MAX_EMPTY_SEARCHES, maxEmptySearches);
builder.endObject();
return builder;
}
@ -202,6 +207,10 @@ public class DatafeedUpdate implements ToXContentObject {
return delayedDataCheckConfig;
}
public Integer getMaxEmptySearches() {
return maxEmptySearches;
}
private static Map<String, Object> asMap(BytesReference bytesReference) {
return bytesReference == null ? null : XContentHelper.convertToMap(bytesReference, true, XContentType.JSON).v2();
}
@ -237,7 +246,8 @@ public class DatafeedUpdate implements ToXContentObject {
&& Objects.equals(asMap(this.aggregations), asMap(that.aggregations))
&& Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig)
&& Objects.equals(this.scriptFields, that.scriptFields)
&& Objects.equals(this.chunkingConfig, that.chunkingConfig);
&& Objects.equals(this.chunkingConfig, that.chunkingConfig)
&& Objects.equals(this.maxEmptySearches, that.maxEmptySearches);
}
/**
@ -248,7 +258,7 @@ public class DatafeedUpdate implements ToXContentObject {
@Override
public int hashCode() {
return Objects.hash(id, jobId, frequency, queryDelay, indices, asMap(query), scrollSize, asMap(aggregations), scriptFields,
chunkingConfig, delayedDataCheckConfig);
chunkingConfig, delayedDataCheckConfig, maxEmptySearches);
}
public static Builder builder(String id) {
@ -268,6 +278,7 @@ public class DatafeedUpdate implements ToXContentObject {
private Integer scrollSize;
private ChunkingConfig chunkingConfig;
private DelayedDataCheckConfig delayedDataCheckConfig;
private Integer maxEmptySearches;
public Builder(String id) {
this.id = Objects.requireNonNull(id, DatafeedConfig.ID.getPreferredName());
@ -285,6 +296,7 @@ public class DatafeedUpdate implements ToXContentObject {
this.scrollSize = config.scrollSize;
this.chunkingConfig = config.chunkingConfig;
this.delayedDataCheckConfig = config.delayedDataCheckConfig;
this.maxEmptySearches = config.maxEmptySearches;
}
@Deprecated
@ -364,9 +376,14 @@ public class DatafeedUpdate implements ToXContentObject {
return this;
}
public Builder setMaxEmptySearches(int maxEmptySearches) {
this.maxEmptySearches = maxEmptySearches;
return this;
}
public DatafeedUpdate build() {
return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, query, aggregations, scriptFields, scrollSize,
chunkingConfig, delayedDataCheckConfig);
chunkingConfig, delayedDataCheckConfig, maxEmptySearches);
}
private static BytesReference xContentToBytes(ToXContentObject object) throws IOException {

View File

@ -0,0 +1,25 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.slm;
import org.elasticsearch.client.TimedRequest;
public class SnapshotLifecycleManagementStatusRequest extends TimedRequest {
}

View File

@ -0,0 +1,25 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.slm;
import org.elasticsearch.client.TimedRequest;
public class StartSLMRequest extends TimedRequest {
}

View File

@ -0,0 +1,25 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.slm;
import org.elasticsearch.client.TimedRequest;
public class StopSLMRequest extends TimedRequest {
}

View File

@ -31,9 +31,11 @@ import org.elasticsearch.client.ccr.FollowInfoRequest;
import org.elasticsearch.client.ccr.FollowStatsRequest;
import org.elasticsearch.client.ccr.ForgetFollowerRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PauseAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PauseFollowRequest;
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PutFollowRequest;
import org.elasticsearch.client.ccr.ResumeAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.ResumeFollowRequest;
import org.elasticsearch.client.ccr.UnfollowRequest;
import org.elasticsearch.common.unit.ByteSizeValue;
@ -143,6 +145,26 @@ public class CcrRequestConvertersTests extends ESTestCase {
assertThat(result.getEntity(), nullValue());
}
public void testPauseAutofollowPattern() throws Exception {
PauseAutoFollowPatternRequest pauseAutoFollowPatternRequest = new PauseAutoFollowPatternRequest(randomAlphaOfLength(4));
Request result = CcrRequestConverters.pauseAutoFollowPattern(pauseAutoFollowPatternRequest);
assertThat(result.getMethod(), equalTo(HttpPost.METHOD_NAME));
assertThat(result.getEndpoint(), equalTo("/_ccr/auto_follow/" + pauseAutoFollowPatternRequest.getName() + "/pause"));
assertThat(result.getParameters().size(), equalTo(0));
assertThat(result.getEntity(), nullValue());
}
public void testResumeAutofollowPattern() throws Exception {
ResumeAutoFollowPatternRequest resumeAutoFollowPatternRequest = new ResumeAutoFollowPatternRequest(randomAlphaOfLength(4));
Request result = CcrRequestConverters.resumeAutoFollowPattern(resumeAutoFollowPatternRequest);
assertThat(result.getMethod(), equalTo(HttpPost.METHOD_NAME));
assertThat(result.getEndpoint(), equalTo("/_ccr/auto_follow/" + resumeAutoFollowPatternRequest.getName() + "/resume"));
assertThat(result.getParameters().size(), equalTo(0));
assertThat(result.getEntity(), nullValue());
}
public void testGetCcrStats() throws Exception {
CcrStatsRequest ccrStatsRequest = new CcrStatsRequest();
Request result = CcrRequestConverters.getCcrStats(ccrStatsRequest);

View File

@ -44,10 +44,12 @@ import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse;
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse.Pattern;
import org.elasticsearch.client.ccr.IndicesFollowStats;
import org.elasticsearch.client.ccr.PauseAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PauseFollowRequest;
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PutFollowRequest;
import org.elasticsearch.client.ccr.PutFollowResponse;
import org.elasticsearch.client.ccr.ResumeAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.ResumeFollowRequest;
import org.elasticsearch.client.ccr.UnfollowRequest;
import org.elasticsearch.client.core.AcknowledgedResponse;
@ -681,6 +683,124 @@ public class CCRDocumentationIT extends ESRestHighLevelClientTestCase {
}
}
public void testPauseAutoFollowPattern() throws Exception {
final RestHighLevelClient client = highLevelClient();
{
final PutAutoFollowPatternRequest putRequest =
new PutAutoFollowPatternRequest("my_pattern", "local", Collections.singletonList("logs-*"));
AcknowledgedResponse putResponse = client.ccr().putAutoFollowPattern(putRequest, RequestOptions.DEFAULT);
assertThat(putResponse.isAcknowledged(), is(true));
}
// tag::ccr-pause-auto-follow-pattern-request
PauseAutoFollowPatternRequest request =
new PauseAutoFollowPatternRequest("my_pattern"); // <1>
// end::ccr-pause-auto-follow-pattern-request
// tag::ccr-pause-auto-follow-pattern-execute
AcknowledgedResponse response = client.ccr()
.pauseAutoFollowPattern(request, RequestOptions.DEFAULT);
// end::ccr-pause-auto-follow-pattern-execute
// tag::ccr-pause-auto-follow-pattern-response
boolean acknowledged = response.isAcknowledged(); // <1>
// end::ccr-pause-auto-follow-pattern-response
// tag::ccr-pause-auto-follow-pattern-execute-listener
ActionListener<AcknowledgedResponse> listener =
new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(AcknowledgedResponse response) { // <1>
boolean paused = response.isAcknowledged();
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::ccr-pause-auto-follow-pattern-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::ccr-pause-auto-follow-pattern-execute-async
client.ccr().pauseAutoFollowPatternAsync(request,
RequestOptions.DEFAULT, listener); // <1>
// end::ccr-pause-auto-follow-pattern-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
// Cleanup:
{
DeleteAutoFollowPatternRequest deleteRequest = new DeleteAutoFollowPatternRequest("my_pattern");
AcknowledgedResponse deleteResponse = client.ccr().deleteAutoFollowPattern(deleteRequest, RequestOptions.DEFAULT);
assertThat(deleteResponse.isAcknowledged(), is(true));
}
}
public void testResumeAutoFollowPattern() throws Exception {
final RestHighLevelClient client = highLevelClient();
{
final PutAutoFollowPatternRequest putRequest =
new PutAutoFollowPatternRequest("my_pattern", "local", Collections.singletonList("logs-*"));
AcknowledgedResponse putResponse = client.ccr().putAutoFollowPattern(putRequest, RequestOptions.DEFAULT);
assertThat(putResponse.isAcknowledged(), is(true));
final PauseAutoFollowPatternRequest pauseRequest = new PauseAutoFollowPatternRequest("my_pattern");
AcknowledgedResponse pauseResponse = client.ccr().pauseAutoFollowPattern(pauseRequest, RequestOptions.DEFAULT);
assertThat(pauseResponse.isAcknowledged(), is(true));
}
// tag::ccr-resume-auto-follow-pattern-request
ResumeAutoFollowPatternRequest request =
new ResumeAutoFollowPatternRequest("my_pattern"); // <1>
// end::ccr-resume-auto-follow-pattern-request
// tag::ccr-resume-auto-follow-pattern-execute
AcknowledgedResponse response = client.ccr()
.resumeAutoFollowPattern(request, RequestOptions.DEFAULT);
// end::ccr-resume-auto-follow-pattern-execute
// tag::ccr-resume-auto-follow-pattern-response
boolean acknowledged = response.isAcknowledged(); // <1>
// end::ccr-resume-auto-follow-pattern-response
// tag::ccr-resume-auto-follow-pattern-execute-listener
ActionListener<AcknowledgedResponse> listener =
new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(AcknowledgedResponse response) { // <1>
boolean resumed = response.isAcknowledged();
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::ccr-resume-auto-follow-pattern-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::ccr-resume-auto-follow-pattern-execute-async
client.ccr().resumeAutoFollowPatternAsync(request,
RequestOptions.DEFAULT, listener); // <1>
// end::ccr-resume-auto-follow-pattern-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
// Cleanup:
{
DeleteAutoFollowPatternRequest deleteRequest = new DeleteAutoFollowPatternRequest("my_pattern");
AcknowledgedResponse deleteResponse = client.ccr().deleteAutoFollowPattern(deleteRequest, RequestOptions.DEFAULT);
assertThat(deleteResponse.isAcknowledged(), is(true));
}
}
public void testGetCCRStats() throws Exception {
RestHighLevelClient client = highLevelClient();

View File

@ -64,10 +64,13 @@ import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsRequest;
import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsResponse;
import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.slm.SnapshotInvocationRecord;
import org.elasticsearch.client.slm.SnapshotLifecycleManagementStatusRequest;
import org.elasticsearch.client.slm.SnapshotLifecyclePolicy;
import org.elasticsearch.client.slm.SnapshotLifecyclePolicyMetadata;
import org.elasticsearch.client.slm.SnapshotLifecycleStats;
import org.elasticsearch.client.slm.SnapshotRetentionConfiguration;
import org.elasticsearch.client.slm.StartSLMRequest;
import org.elasticsearch.client.slm.StopSLMRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
@ -460,7 +463,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
public void testStartStopStatus() throws Exception {
public void testILMStartStopStatus() throws Exception {
RestHighLevelClient client = highLevelClient();
stopILM(client);
@ -776,7 +779,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
assertTrue(response.isAcknowledged());
//////// PUT
// tag::slm-put-snapshot-lifecycle-policy
// tag::slm-put-snapshot-lifecycle-policy-request
Map<String, Object> config = new HashMap<>();
config.put("indices", Collections.singletonList("idx"));
SnapshotRetentionConfiguration retention =
@ -786,7 +789,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
"my_repository", config, retention);
PutSnapshotLifecyclePolicyRequest request =
new PutSnapshotLifecyclePolicyRequest(policy);
// end::slm-put-snapshot-lifecycle-policy
// end::slm-put-snapshot-lifecycle-policy-request
// tag::slm-put-snapshot-lifecycle-policy-execute
AcknowledgedResponse resp = client.indexLifecycle()
@ -815,16 +818,16 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
// tag::slm-put-snapshot-lifecycle-policy-execute-async
client.indexLifecycle().putSnapshotLifecyclePolicyAsync(request,
RequestOptions.DEFAULT, putListener);
RequestOptions.DEFAULT, putListener); // <1>
// end::slm-put-snapshot-lifecycle-policy-execute-async
//////// GET
// tag::slm-get-snapshot-lifecycle-policy
// tag::slm-get-snapshot-lifecycle-policy-request
GetSnapshotLifecyclePolicyRequest getAllRequest =
new GetSnapshotLifecyclePolicyRequest(); // <1>
GetSnapshotLifecyclePolicyRequest getRequest =
new GetSnapshotLifecyclePolicyRequest("policy_id"); // <2>
// end::slm-get-snapshot-lifecycle-policy
// end::slm-get-snapshot-lifecycle-policy-request
// tag::slm-get-snapshot-lifecycle-policy-execute
GetSnapshotLifecyclePolicyResponse getResponse =
@ -851,7 +854,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
// tag::slm-get-snapshot-lifecycle-policy-execute-async
client.indexLifecycle().getSnapshotLifecyclePolicyAsync(getRequest,
RequestOptions.DEFAULT, getListener);
RequestOptions.DEFAULT, getListener); // <1>
// end::slm-get-snapshot-lifecycle-policy-execute-async
assertThat(getResponse.getPolicies().size(), equalTo(1));
@ -879,10 +882,10 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build());
//////// EXECUTE
// tag::slm-execute-snapshot-lifecycle-policy
// tag::slm-execute-snapshot-lifecycle-policy-request
ExecuteSnapshotLifecyclePolicyRequest executeRequest =
new ExecuteSnapshotLifecyclePolicyRequest("policy_id"); // <1>
// end::slm-execute-snapshot-lifecycle-policy
// end::slm-execute-snapshot-lifecycle-policy-request
// tag::slm-execute-snapshot-lifecycle-policy-execute
ExecuteSnapshotLifecyclePolicyResponse executeResponse =
@ -937,7 +940,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
// tag::slm-execute-snapshot-lifecycle-policy-execute-async
client.indexLifecycle()
.executeSnapshotLifecyclePolicyAsync(executeRequest,
RequestOptions.DEFAULT, executeListener);
RequestOptions.DEFAULT, executeListener); // <1>
// end::slm-execute-snapshot-lifecycle-policy-execute-async
latch.await(5, TimeUnit.SECONDS);
@ -958,42 +961,50 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
greaterThanOrEqualTo(1L));
//////// DELETE
// tag::slm-delete-snapshot-lifecycle-policy
// tag::slm-delete-snapshot-lifecycle-policy-request
DeleteSnapshotLifecyclePolicyRequest deleteRequest =
new DeleteSnapshotLifecyclePolicyRequest("policy_id"); // <1>
// end::slm-delete-snapshot-lifecycle-policy
// end::slm-delete-snapshot-lifecycle-policy-request
// tag::slm-delete-snapshot-lifecycle-policy-execute
AcknowledgedResponse deleteResp = client.indexLifecycle()
.deleteSnapshotLifecyclePolicy(deleteRequest, RequestOptions.DEFAULT);
// end::slm-delete-snapshot-lifecycle-policy-execute
// tag::slm-delete-snapshot-lifecycle-policy-response
boolean deleteAcknowledged = deleteResp.isAcknowledged(); // <1>
// end::slm-delete-snapshot-lifecycle-policy-response
assertTrue(deleteResp.isAcknowledged());
ActionListener<AcknowledgedResponse> deleteListener = new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(AcknowledgedResponse resp) {
// no-op
}
// tag::slm-delete-snapshot-lifecycle-policy-execute-listener
ActionListener<AcknowledgedResponse> deleteListener =
new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(AcknowledgedResponse resp) {
boolean deleteAcknowledged = resp.isAcknowledged(); // <1>
}
@Override
public void onFailure(Exception e) {
// no-op
}
};
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::slm-delete-snapshot-lifecycle-policy-execute-listener
// tag::slm-delete-snapshot-lifecycle-policy-execute-async
client.indexLifecycle()
.deleteSnapshotLifecyclePolicyAsync(deleteRequest,
RequestOptions.DEFAULT, deleteListener);
RequestOptions.DEFAULT, deleteListener); // <1>
// end::slm-delete-snapshot-lifecycle-policy-execute-async
assertTrue(deleteResp.isAcknowledged());
//////// EXECUTE RETENTION
// tag::slm-execute-snapshot-lifecycle-retention
// tag::slm-execute-snapshot-lifecycle-retention-request
ExecuteSnapshotLifecycleRetentionRequest req =
new ExecuteSnapshotLifecycleRetentionRequest();
// end::slm-execute-snapshot-lifecycle-retention
// end::slm-execute-snapshot-lifecycle-retention-request
// tag::slm-execute-snapshot-lifecycle-retention-execute
AcknowledgedResponse retentionResp =
@ -1006,7 +1017,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
final boolean acked = retentionResp.isAcknowledged();
// end::slm-execute-snapshot-lifecycle-retention-response
// tag::slm-execute-snapshot-lifecycle-policy-execute-listener
// tag::slm-execute-snapshot-lifecycle-retention-execute-listener
ActionListener<AcknowledgedResponse> retentionListener =
new ActionListener<AcknowledgedResponse>() {
@Override
@ -1024,7 +1035,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
// tag::slm-execute-snapshot-lifecycle-retention-execute-async
client.indexLifecycle()
.executeSnapshotLifecycleRetentionAsync(req,
RequestOptions.DEFAULT, retentionListener);
RequestOptions.DEFAULT, retentionListener); // <1>
// end::slm-execute-snapshot-lifecycle-retention-execute-async
}
@ -1051,6 +1062,152 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
});
}
public void testSLMStartStopStatus() throws Exception {
RestHighLevelClient client = highLevelClient();
stopSLM(client);
// tag::slm-status-request
SnapshotLifecycleManagementStatusRequest request =
new SnapshotLifecycleManagementStatusRequest();
// end::slm-status-request
// Check that SLM has stopped
{
// tag::slm-status-execute
LifecycleManagementStatusResponse response =
client.indexLifecycle()
.getSLMStatus(request, RequestOptions.DEFAULT);
// end::slm-status-execute
// tag::slm-status-response
OperationMode operationMode = response.getOperationMode(); // <1>
// end::slm-status-response
assertThat(operationMode, Matchers.either(equalTo(OperationMode.STOPPING)).or(equalTo(OperationMode.STOPPED)));
}
startSLM(client);
// tag::slm-status-execute-listener
ActionListener<LifecycleManagementStatusResponse> listener =
new ActionListener<LifecycleManagementStatusResponse>() {
@Override
public void onResponse(
LifecycleManagementStatusResponse response) {
OperationMode operationMode = response
.getOperationMode(); // <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::slm-status-execute-listener
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::slm-status-execute-async
client.indexLifecycle().getSLMStatusAsync(request,
RequestOptions.DEFAULT, listener); // <1>
// end::slm-status-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
// Check that SLM is running again
LifecycleManagementStatusResponse response =
client.indexLifecycle()
.getSLMStatus(request, RequestOptions.DEFAULT);
OperationMode operationMode = response.getOperationMode();
assertEquals(OperationMode.RUNNING, operationMode);
}
private void stopSLM(RestHighLevelClient client) throws IOException, InterruptedException {
// tag::slm-stop-slm-request
StopSLMRequest request = new StopSLMRequest();
// end::slm-stop-slm-request
// tag::slm-stop-slm-execute
AcknowledgedResponse response = client.indexLifecycle()
.stopSLM(request, RequestOptions.DEFAULT);
// end::slm-stop-slm-execute
// tag::slm-stop-slm-response
boolean acknowledged = response.isAcknowledged(); // <1>
// end::slm-stop-slm-response
assertTrue(acknowledged);
// tag::slm-stop-slm-execute-listener
ActionListener<AcknowledgedResponse> listener =
new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(AcknowledgedResponse response) {
boolean acknowledged = response.isAcknowledged(); // <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::slm-stop-slm-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::slm-stop-slm-execute-async
client.indexLifecycle().stopSLMAsync(request,
RequestOptions.DEFAULT, listener); // <1>
// end::slm-stop-slm-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
private void startSLM(RestHighLevelClient client) throws IOException, InterruptedException {
// tag::slm-start-slm-request
StartSLMRequest request1 = new StartSLMRequest();
// end::slm-start-slm-request
// tag::slm-start-slm-execute
AcknowledgedResponse response = client.indexLifecycle()
.startSLM(request1, RequestOptions.DEFAULT);
// end::slm-start-slm-execute
// tag::slm-start-slm-response
boolean acknowledged = response.isAcknowledged(); // <1>
// end::slm-start-slm-response
assertTrue(acknowledged);
// tag::slm-start-slm-execute-listener
ActionListener<AcknowledgedResponse> listener =
new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(AcknowledgedResponse response) {
boolean acknowledged = response.isAcknowledged(); // <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::slm-start-slm-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::slm-start-slm-execute-async
client.indexLifecycle().startSLMAsync(request1,
RequestOptions.DEFAULT, listener); // <1>
// end::slm-start-slm-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
static Map<String, Object> toMap(Response response) throws IOException {
return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false);
}

View File

@ -106,6 +106,9 @@ public class DatafeedConfigTests extends AbstractXContentTestCase<DatafeedConfig
if (randomBoolean()) {
builder.setDelayedDataCheckConfig(DelayedDataCheckConfigTests.createRandomizedConfig());
}
if (randomBoolean()) {
builder.setMaxEmptySearches(randomIntBetween(10, 100));
}
return builder;
}

View File

@ -83,6 +83,9 @@ public class DatafeedUpdateTests extends AbstractXContentTestCase<DatafeedUpdate
if (randomBoolean()) {
builder.setDelayedDataCheckConfig(DelayedDataCheckConfigTests.createRandomizedConfig());
}
if (randomBoolean()) {
builder.setMaxEmptySearches(randomIntBetween(10, 100));
}
return builder.build();
}

View File

@ -56,6 +56,7 @@ import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@ -73,6 +74,7 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Collections.singletonList;
/**
@ -119,6 +121,34 @@ public class RestClient implements Closeable {
setNodes(nodes);
}
/**
* Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation.
* Creates a new builder instance and sets the nodes that the client will send requests to.
*
* @param cloudId a valid elastic cloud cloudId that will route to a cluster. The cloudId is located in
* the user console https://cloud.elastic.co and will resemble a string like the following
* optionalHumanReadableName:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRlbGFzdGljc2VhcmNoJGtpYmFuYQ==
*/
public static RestClientBuilder builder(String cloudId) {
// there is an optional first portion of the cloudId that is a human readable string, but it is not used.
if (cloudId.contains(":")) {
if (cloudId.indexOf(":") == cloudId.length() - 1) {
throw new IllegalStateException("cloudId " + cloudId + " must begin with a human readable identifier followed by a colon");
}
cloudId = cloudId.substring(cloudId.indexOf(":") + 1);
}
String decoded = new String(Base64.getDecoder().decode(cloudId), UTF_8);
// once decoded the parts are separated by a $ character
String[] decodedParts = decoded.split("\\$");
if (decodedParts.length != 3) {
throw new IllegalStateException("cloudId " + cloudId + " did not decode to a cluster identifier correctly");
}
String url = decodedParts[1] + "." + decodedParts[0];
return builder(new HttpHost(url, 443, "https"));
}
/**
* Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation.
* Creates a new builder instance and sets the hosts that the client will send requests to.

View File

@ -26,8 +26,10 @@ import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.message.BasicHeader;
import java.io.IOException;
import java.util.Base64;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
@ -159,6 +161,38 @@ public class RestClientBuilderTests extends RestClientTestCase {
}
}
public void testBuildCloudId() throws IOException {
String host = "us-east-1.aws.found.io";
String esId = "elasticsearch";
String kibanaId = "kibana";
String toEncode = host + "$" + esId + "$" + kibanaId;
String encodedId = Base64.getEncoder().encodeToString(toEncode.getBytes(UTF8));
assertNotNull(RestClient.builder(encodedId));
assertNotNull(RestClient.builder("humanReadable:" + encodedId));
String badId = Base64.getEncoder().encodeToString("foo$bar".getBytes(UTF8));
try {
RestClient.builder(badId);
fail("should have failed");
} catch (IllegalStateException e) {
assertEquals("cloudId " + badId + " did not decode to a cluster identifier correctly", e.getMessage());
}
try {
RestClient.builder(badId + ":");
fail("should have failed");
} catch (IllegalStateException e) {
assertEquals("cloudId " + badId + ":" + " must begin with a human readable identifier followed by a colon", e.getMessage());
}
RestClient client = RestClient.builder(encodedId).build();
assertThat(client.getNodes().size(), equalTo(1));
assertThat(client.getNodes().get(0).getHost().getHostName(), equalTo(esId + "." + host));
assertThat(client.getNodes().get(0).getHost().getPort(), equalTo(443));
assertThat(client.getNodes().get(0).getHost().getSchemeName(), equalTo("https"));
client.close();
}
public void testSetPathPrefixNull() {
try {
RestClient.builder(new HttpHost("localhost", 9200)).setPathPrefix(null);

View File

@ -0,0 +1,32 @@
--
:api: ccr-pause-auto-follow-pattern
:request: PauseAutoFollowPatternRequest
:response: AcknowledgedResponse
--
[role="xpack"]
[id="{upid}-{api}"]
=== Pause Auto Follow Pattern API
[id="{upid}-{api}-request"]
==== Request
The Pause Auto Follow Pattern API allows you to pause an existing auto follow pattern.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request]
--------------------------------------------------
<1> The name of the auto follow pattern.
[id="{upid}-{api}-response"]
==== Response
The returned +{response}+ indicates if the pause auto follow pattern request was received.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response]
--------------------------------------------------
<1> Whether or not the pause auto follow pattern request was acknowledged.
include::../execution.asciidoc[]

View File

@ -0,0 +1,33 @@
--
:api: ccr-resume-auto-follow-pattern
:request: ResumeAutoFollowPatternRequest
:response: AcknowledgedResponse
--
[role="xpack"]
[id="{upid}-{api}"]
=== Resume Auto Follow Pattern API
[id="{upid}-{api}-request"]
==== Request
The Resume Auto Follow Pattern API allows you to resume the activity
for a pause auto follow pattern.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request]
--------------------------------------------------
<1> The name of the auto follow pattern.
[id="{upid}-{api}-response"]
==== Response
The returned +{response}+ indicates if the resume auto follow pattern request was received.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response]
--------------------------------------------------
<1> Whether or not the resume auto follow pattern request was acknowledged.
include::../execution.asciidoc[]

View File

@ -0,0 +1,36 @@
--
:api: slm-status
:request: SnapshotLifecycleManagementStatusRequest
:response: AcknowledgedResponse
--
[role="xpack"]
[id="{upid}-{api}"]
=== Snapshot Lifecycle Management Status API
[id="{upid}-{api}-request"]
==== Request
The Snapshot Lifecycle Management Status API allows you to retrieve the status
of Snapshot Lifecycle Management
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request]
--------------------------------------------------
[id="{upid}-{api}-response"]
==== Response
The returned +{response}+ indicates the status of Snapshot Lifecycle Management.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response]
--------------------------------------------------
<1> The returned status can be `RUNNING`, `STOPPING`, or `STOPPED`.
include::../execution.asciidoc[]

View File

@ -0,0 +1,36 @@
--
:api: slm-start-slm
:request: StartSLMRequest
:response: AcknowledgedResponse
--
[role="xpack"]
[id="{upid}-{api}"]
=== Start Snapshot Lifecycle Management API
[id="{upid}-{api}-request"]
==== Request
The Start Snapshot Lifecycle Management API allows you to start Snapshot
Lifecycle Management if it has previously been stopped.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request]
--------------------------------------------------
[id="{upid}-{api}-response"]
==== Response
The returned +{response}+ indicates if the request to start Snapshot Lifecycle
Management was received.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response]
--------------------------------------------------
<1> Whether or not the request to start Snapshot Lifecycle Management was
acknowledged.
include::../execution.asciidoc[]

View File

@ -0,0 +1,38 @@
--
:api: slm-stop-slm
:request: StopSLMRequest
:response: AcknowledgedResponse
--
[role="xpack"]
[id="{upid}-{api}"]
=== Stop Snapshot Lifecycle Management API
[id="{upid}-{api}-request"]
==== Request
The Stop Snapshot Management API allows you to stop Snapshot Lifecycle
Management temporarily.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request]
--------------------------------------------------
[id="{upid}-{api}-response"]
==== Response
The returned +{response}+ indicates if the request to stop Snapshot
Lifecycle Management was received.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response]
--------------------------------------------------
<1> Whether or not the request to stop Snapshot Lifecycle Management was
acknowledged.
include::../execution.asciidoc[]

View File

@ -529,6 +529,8 @@ The Java High Level REST Client supports the following CCR APIs:
* <<{upid}-ccr-put-auto-follow-pattern>>
* <<{upid}-ccr-delete-auto-follow-pattern>>
* <<{upid}-ccr-get-auto-follow-pattern>>
* <<{upid}-ccr-pause-auto-follow-pattern>>
* <<{upid}-ccr-resume-auto-follow-pattern>>
* <<{upid}-ccr-get-stats>>
* <<{upid}-ccr-get-follow-stats>>
* <<{upid}-ccr-get-follow-info>>
@ -541,6 +543,8 @@ include::ccr/forget_follower.asciidoc[]
include::ccr/put_auto_follow_pattern.asciidoc[]
include::ccr/delete_auto_follow_pattern.asciidoc[]
include::ccr/get_auto_follow_pattern.asciidoc[]
include::ccr/pause_auto_follow_pattern.asciidoc[]
include::ccr/resume_auto_follow_pattern.asciidoc[]
include::ccr/get_stats.asciidoc[]
include::ccr/get_follow_stats.asciidoc[]
include::ccr/get_follow_info.asciidoc[]
@ -575,6 +579,35 @@ include::ilm/lifecycle_management_status.asciidoc[]
include::ilm/retry_lifecycle_policy.asciidoc[]
include::ilm/remove_lifecycle_policy_from_index.asciidoc[]
[role="xpack"]
== Snapshot Lifecycle Management APIs
:upid: {mainid}-ilm
:doc-tests-file: {doc-tests}/ILMDocumentationIT.java
The Java High Level REST Client supports the following Snapshot Lifecycle
Management APIs:
* <<{upid}-slm-put-snapshot-lifecycle-policy>>
* <<{upid}-slm-delete-snapshot-lifecycle-policy>>
* <<{upid}-ilm-get-lifecycle-policy>>
* <<{upid}-slm-start-slm>>
* <<{upid}-slm-stop-slm>>
* <<{upid}-slm-status>>
* <<{upid}-slm-execute-snapshot-lifecycle-policy>>
* <<{upid}-slm-execute-snapshot-lifecycle-retention>>
include::ilm/put_snapshot_lifecycle_policy.asciidoc[]
include::ilm/delete_snapshot_lifecycle_policy.asciidoc[]
include::ilm/get_snapshot_lifecycle_policy.asciidoc[]
include::ilm/start_snapshot_lifecycle_management.asciidoc[]
include::ilm/stop_snapshot_lifecycle_management.asciidoc[]
include::ilm/snapshot_lifecycle_management_status.asciidoc[]
include::ilm/execute_snapshot_lifecycle_policy.asciidoc[]
include::ilm/execute_snapshot_lifecycle_retention.asciidoc[]
[role="xpack"]
[[transform_apis]]
== {transform-cap} APIs

View File

@ -34,8 +34,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]

View File

@ -34,10 +34,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h]
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]

View File

@ -31,10 +31,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=bytes]
include::{docdir}/rest-api/common-parms.asciidoc[tag=http-format]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h]
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]

View File

@ -45,10 +45,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h]
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
include::{docdir}/rest-api/common-parms.asciidoc[tag=time]

View File

@ -50,10 +50,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=index-query-parm]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
include::{docdir}/rest-api/common-parms.asciidoc[tag=time]

View File

@ -95,8 +95,6 @@ Reason for any snapshot failures.
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
`ignore_unavailable`::
(Optional, boolean) If `true`, the response does not include information from
unavailable snapshots. Defaults to `false`.

View File

@ -51,8 +51,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=node-id-query-parm]
include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
include::{docdir}/rest-api/common-parms.asciidoc[tag=parent-task-id]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]

View File

@ -15,10 +15,9 @@ SLM policy management is split into three different CRUD APIs, a way to put or u
policies, a way to retrieve policies, and a way to delete unwanted policies, as
well as a separate API for immediately invoking a snapshot based on a policy.
Since SLM falls under the same category as ILM, it is stopped and started by
using the <<start-stop-ilm,start and stop>> ILM APIs. It is, however, managed
by a different enable setting. To disable SLM's functionality, set the cluster
setting `xpack.slm.enabled` to `false` in elasticsearch.yml.
SLM can be stopped temporarily and restarted using the <<slm-stop,Stop SLM>> and
<<slm-start,Start SLM>> APIs. To disable SLM's functionality entirely, set the
cluster setting `xpack.slm.enabled` to `false` in elasticsearch.yml.
[[slm-api-put]]
=== Put snapshot lifecycle policy API
@ -317,21 +316,42 @@ GET /_slm/policy
[[slm-api-execute]]
=== Execute Snapshot Lifecycle Policy API
=== Execute snapshot lifecycle policy API
++++
<titleabbrev>Execute snapshot lifecycle policy</titleabbrev>
++++
Executes a snapshot lifecycle policy, immediately creating a snapshot
without waiting for the scheduled creation time.
[[slm-api-execute-request]]
==== {api-request-title}
`PUT /_slm/policy/<snapshot-lifecycle-policy-id>/_execute`
[[slm-api-execute-desc]]
==== {api-description-title}
Sometimes it can be useful to immediately execute a snapshot based on policy,
perhaps before an upgrade or before performing other maintenance on indices. The
execute snapshot policy API allows you to perform a snapshot immediately without
waiting for a policy's scheduled invocation.
==== Path Parameters
`policy_id` (required)::
(string) Id of the policy to execute
[[slm-api-execute-path-params]]
==== {api-path-parms-title}
==== Example
`<snapshot-lifecycle-policy-id>`::
(Required, string)
ID of the snapshot lifecycle policy to execute.
To take an immediate snapshot using a policy, use the following
[[slm-api-execute-example]]
==== {api-examples-title}
To take an immediate snapshot using a policy, use the following request:
[source,console]
--------------------------------------------------
@ -339,7 +359,7 @@ POST /_slm/policy/daily-snapshots/_execute
--------------------------------------------------
// TEST[skip:we can't easily handle snapshots from docs tests]
This API will immediately return with the generated snapshot name
This API returns the following response with the generated snapshot name:
[source,console-result]
--------------------------------------------------
@ -450,8 +470,7 @@ POST /_slm/policy/daily-snapshots/_execute
--------------------------------------------------
// TESTRESPONSE[skip:we can't handle snapshots in docs tests]
Now retriving the policy shows that the policy has successfully been executed:
Now retrieving the policy shows that the policy has successfully been executed:
[source,console]
--------------------------------------------------
@ -514,12 +533,22 @@ Which now includes the successful snapshot information:
It is a good idea to test policies using the execute API to ensure they work.
[[slm-get-stats]]
=== Get Snapshot Lifecycle Stats API
=== Get snapshot lifecycle stats API
++++
<titleabbrev>Get snapshot lifecycle stats</titleabbrev>
++++
SLM stores statistics on a global and per-policy level about actions taken. These stats can be
retrieved by using the following API:
Returns global and policy-level statistics about actions taken by {slm}.
==== Example
[[slm-api-stats-request]]
==== {api-request-title}
`GET /_slm/stats`
[[slm-api-stats-example]]
==== {api-examples-title}
[source,console]
--------------------------------------------------
@ -527,7 +556,7 @@ GET /_slm/stats
--------------------------------------------------
// TEST[continued]
Which returns a response similar to:
The API returns the following response:
[source,js]
--------------------------------------------------
@ -546,19 +575,40 @@ Which returns a response similar to:
--------------------------------------------------
// TESTRESPONSE[s/runs": 13/runs": $body.retention_runs/ s/_failed": 0/_failed": $body.retention_failed/ s/_timed_out": 0/_timed_out": $body.retention_timed_out/ s/"1.4s"/$body.retention_deletion_time/ s/1404/$body.retention_deletion_time_millis/ s/total_snapshots_taken": 1/total_snapshots_taken": $body.total_snapshots_taken/ s/total_snapshots_failed": 1/total_snapshots_failed": $body.total_snapshots_failed/ s/"policy_stats": [.*]/"policy_stats": $body.policy_stats/]
[[slm-api-delete]]
=== Delete Snapshot Lifecycle Policy API
=== Delete snapshot lifecycle policy API
++++
<titleabbrev>Delete snapshot lifecycle policy</titleabbrev>
++++
Deletes an existing snapshot lifecycle policy.
[[slm-api-delete-request]]
==== {api-request-title}
`DELETE /_slm/policy/<snapshot-lifecycle-policy-id>`
[[slm-api-delete-desc]]
==== {api-description-title}
A policy can be deleted by issuing a delete request with the policy id. Note
that this prevents any future snapshots from being taken, but does not cancel
any currently ongoing snapshots or remove any previously taken snapshots.
==== Path Parameters
`policy_id` (optional)::
(string) Id of the policy to remove.
[[slm-api-delete-path-params]]
==== {api-path-parms-title}
==== Example
`<snapshot-lifecycle-policy-id>`::
(Required, string)
ID of the snapshot lifecycle policy to delete.
[[slm-api-delete-example]]
==== {api-examples-title}
[source,console]
--------------------------------------------------
@ -566,23 +616,42 @@ DELETE /_slm/policy/daily-snapshots
--------------------------------------------------
// TEST[continued]
[[slm-api-execute-retention]]
=== Execute Snapshot Lifecycle Retention API
=== Execute snapshot lifecycle retention API
++++
<titleabbrev>Execute snapshot lifecycle retention</titleabbrev>
++++
Deletes any expired snapshots based on lifecycle policy retention rules.
[[slm-api-execute-retention-request]]
==== {api-request-title}
`POST /_slm/_execute_retention`
[[slm-api-execute-retention-desc]]
==== {api-description-title}
While Snapshot Lifecycle Management retention is usually invoked through the global cluster settings
for its schedule, it can sometimes be useful to invoke a retention run to expunge expired snapshots
immediately. This API allows you to run a one-off retention run.
==== Example
To immediately start snapshot retention, use the following
[[slm-api-execute-retention-example]]
==== {api-examples-title}
To immediately start snapshot retention, use the following request:
[source,console]
--------------------------------------------------
POST /_slm/_execute_retention
--------------------------------------------------
This API will immediately return, as retention will be run asynchronously in the background:
This API returns the following response as retention runs asynchronously in the
background:
[source,console-result]
--------------------------------------------------
@ -591,3 +660,163 @@ This API will immediately return, as retention will be run asynchronously in the
}
--------------------------------------------------
[[slm-stop]]
=== Stop Snapshot Lifecycle Management API
[subs="attributes"]
++++
<titleabbrev>Stop Snapshot Lifecycle Management</titleabbrev>
++++
Stop the Snapshot Lifecycle Management (SLM) plugin.
[[slm-stop-request]]
==== {api-request-title}
`POST /_ilm/stop`
[[slm-stop-desc]]
==== {api-description-title}
Halts all snapshot lifecycle management operations and stops the SLM plugin.
This is useful when you are performing maintenance on the cluster and need to
prevent SLM from performing any actions on your indices. Note that this API does
not stop any snapshots that are currently in progress, and that snapshots can
still be taken manually via the <<slm-api-execute,Execute Policy API>> even
when SLM is stopped.
The API returns as soon as the stop request has been acknowledged, but the
plugin might continue to run until in-progress operations complete and the plugin
can be safely stopped. Use the <<slm-get-status, Get SLM Status>> API to see
if SLM is running.
==== Request Parameters
include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
==== Authorization
You must have the `manage_slm` cluster privilege to use this API.
For more information, see <<security-privileges>>.
[[slm-stop-example]]
==== {api-examples-title}
Stops the SLM plugin.
[source,console]
--------------------------------------------------
POST _slm/stop
--------------------------------------------------
// TEST[continued]
If the request does not encounter errors, you receive the following result:
[source,console-result]
--------------------------------------------------
{
"acknowledged": true
}
--------------------------------------------------
[[slm-start]]
=== Start Snapshot Lifecycle Management API
[subs="attributes"]
++++
<titleabbrev>Start Snapshot Lifecycle Management</titleabbrev>
++++
Start the Snapshot Lifecycle Management (SLM) plugin.
[[slm-start-request]]
==== {api-request-title}
`POST /_slm/start`
[[slm-start-desc]]
==== {api-description-title}
Starts the SLM plugin if it is currently stopped. SLM is started
automatically when the cluster is formed. Restarting SLM is only
necessary if it has been stopped using the <<slm-stop, Stop SLM API>>.
==== Request Parameters
include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
==== Authorization
You must have the `manage_slm` cluster privilege to use this API.
For more information, see <<security-privileges>>.
[[slm-start-example]]
==== {api-examples-title}
Starts the SLM plugin.
[source,console]
--------------------------------------------------
POST _slm/start
--------------------------------------------------
// TEST[continued]
If the request succeeds, you receive the following result:
[source,console-result]
--------------------------------------------------
{
"acknowledged": true
}
--------------------------------------------------
[[slm-get-status]]
=== Get Snapshot Lifecycle Management status API
[subs="attributes"]
++++
<titleabbrev>Get Snapshot Lifecycle Management status</titleabbrev>
++++
Retrieves the current Snapshot Lifecycle Management (SLM) status.
[[slm-get-status-request]]
==== {api-request-title}
`GET /_slm/status`
[[slm-get-status-desc]]
==== {api-description-title}
Returns the status of the SLM plugin. The `operation_mode` field in the
response shows one of three states: `STARTED`, `STOPPING`,
or `STOPPED`. You can change the status of the SLM plugin with the
<<slm-start, Start SLM>> and <<slm-stop, Stop SLM>> APIs.
==== Request Parameters
include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
==== Authorization
You must have the `manage_slm` or `read_slm` or both cluster privileges to use this API.
For more information, see <<security-privileges>>.
[[slm-get-status-example]]
==== {api-examples-title}
Gets the SLM plugin status.
[source,console]
--------------------------------------------------
GET _slm/status
--------------------------------------------------
If the request succeeds, the body of the response shows the operation mode:
[source,console-result]
--------------------------------------------------
{
"operation_mode": "RUNNING"
}
--------------------------------------------------

View File

@ -65,6 +65,15 @@ A {dfeed} resource has the following properties:
`{"enabled": true, "check_window": "1h"}` See
<<ml-datafeed-delayed-data-check-config>>.
`max_empty_searches`::
(integer) If a real-time {dfeed} has never seen any data (including during
any initial training period) then it will automatically stop itself and
close its associated job after this many real-time searches that return no
documents. In other words, it will stop after `frequency` times
`max_empty_searches` of real-time operation. If not set
then a {dfeed} with no end time that sees no data will remain started until
it is explicitly stopped. By default this setting is not set.
[[ml-datafeed-chunking-config]]
==== Chunking configuration objects

View File

@ -101,6 +101,15 @@ parallel and close one when you are satisfied with the results of the other job.
(Optional, unsigned integer) The `size` parameter that is used in {es}
searches. The default value is `1000`.
`max_empty_searches`::
(Optional, integer) If a real-time {dfeed} has never seen any data (including
during any initial training period) then it will automatically stop itself
and close its associated job after this many real-time searches that return
no documents. In other words, it will stop after `frequency` times
`max_empty_searches` of real-time operation. If not set
then a {dfeed} with no end time that sees no data will remain started until
it is explicitly stopped. The special value `-1` unsets this setting.
For more information about these properties, see <<ml-datafeed-resource>>.

View File

@ -55,9 +55,7 @@ public final class Grok {
"(?::(?<subname>[[:alnum:]@\\[\\]_:.-]+))?" +
")" +
"(?:=(?<definition>" +
"(?:" +
"(?:[^{}]+|\\.+)+" +
")+" +
")" +
")?" + "\\}";
private static final Regex GROK_PATTERN_REGEX = new Regex(GROK_PATTERN.getBytes(StandardCharsets.UTF_8), 0,

View File

@ -1 +0,0 @@
d16cf15d29c409987cecde77407fbb6f1e16d262

View File

@ -0,0 +1 @@
6e6fc9178d1f1401aa0d6b843341efb91720f2cd

View File

@ -1 +0,0 @@
ccfbdfc727cbf702350572a0b12fe92185ebf162

View File

@ -0,0 +1 @@
b1d5ed85a558fbbadc2783f869fbd0adcd32b07b

View File

@ -1 +0,0 @@
4d55b3cdb74cd140d262de96987ebd369125a64c

View File

@ -0,0 +1 @@
5f71267aa784d0e6c5ec09fb988339d244b205a0

View File

@ -1 +0,0 @@
6f8aae763f743d91fb1ba1e9011dae0ef4f6ff34

View File

@ -0,0 +1 @@
e02700b574d3a0e2100308f971f0753ac8700e7c

View File

@ -1 +0,0 @@
ebf1f2bd0dad5e16aa1fc48d32e5dbe507b38d53

View File

@ -0,0 +1 @@
fc6546be5df552d9729f008d8d41a6dee28127aa

View File

@ -1 +0,0 @@
b00be4aa309e9b56e498191aa8c73e4f393759ed

View File

@ -0,0 +1 @@
ccaacf418a9e486b65e82c47bed66439119c5fdb

View File

@ -1 +0,0 @@
cd8b612d5daa42d1be3bb3203e4857597d5db79b

View File

@ -0,0 +1 @@
857502e863c02c829fdafea61c3fda6bda01d0af

View File

@ -1 +0,0 @@
d16cf15d29c409987cecde77407fbb6f1e16d262

View File

@ -0,0 +1 @@
6e6fc9178d1f1401aa0d6b843341efb91720f2cd

View File

@ -1 +0,0 @@
ccfbdfc727cbf702350572a0b12fe92185ebf162

View File

@ -0,0 +1 @@
b1d5ed85a558fbbadc2783f869fbd0adcd32b07b

View File

@ -1 +0,0 @@
4d55b3cdb74cd140d262de96987ebd369125a64c

View File

@ -0,0 +1 @@
5f71267aa784d0e6c5ec09fb988339d244b205a0

View File

@ -1 +0,0 @@
6f8aae763f743d91fb1ba1e9011dae0ef4f6ff34

View File

@ -0,0 +1 @@
e02700b574d3a0e2100308f971f0753ac8700e7c

View File

@ -1 +0,0 @@
ebf1f2bd0dad5e16aa1fc48d32e5dbe507b38d53

View File

@ -0,0 +1 @@
fc6546be5df552d9729f008d8d41a6dee28127aa

View File

@ -1 +0,0 @@
b00be4aa309e9b56e498191aa8c73e4f393759ed

View File

@ -0,0 +1 @@
ccaacf418a9e486b65e82c47bed66439119c5fdb

View File

@ -1 +0,0 @@
cd8b612d5daa42d1be3bb3203e4857597d5db79b

View File

@ -0,0 +1 @@
857502e863c02c829fdafea61c3fda6bda01d0af

View File

@ -36,10 +36,6 @@
"type":"boolean",
"description":"Return local information, do not retrieve the state from master node (default: false)"
},
"master_timeout":{
"type":"time",
"description":"Explicit operation timeout for connection to master node"
},
"h":{
"type":"list",
"description":"Comma-separated list of column names to display"

View File

@ -32,14 +32,6 @@
"type":"string",
"description":"a short version of the Accept header, e.g. json, yaml"
},
"local":{
"type":"boolean",
"description":"Return local information, do not retrieve the state from master node (default: false)"
},
"master_timeout":{
"type":"time",
"description":"Explicit operation timeout for connection to master node"
},
"h":{
"type":"list",
"description":"Comma-separated list of column names to display"

View File

@ -49,14 +49,6 @@
"pb"
]
},
"local":{
"type":"boolean",
"description":"Return local information, do not retrieve the state from master node (default: false)"
},
"master_timeout":{
"type":"time",
"description":"Explicit operation timeout for connection to master node"
},
"h":{
"type":"list",
"description":"Comma-separated list of column names to display"

View File

@ -20,14 +20,6 @@
"type":"string",
"description":"a short version of the Accept header, e.g. json, yaml"
},
"local":{
"type":"boolean",
"description":"Return local information, do not retrieve the state from master node (default: false)"
},
"master_timeout":{
"type":"time",
"description":"Explicit operation timeout for connection to master node"
},
"h":{
"type":"list",
"description":"Comma-separated list of column names to display"

View File

@ -59,10 +59,6 @@
"description":"If `true`, the response includes detailed information about shard recoveries",
"default":false
},
"master_timeout":{
"type":"time",
"description":"Explicit operation timeout for connection to master node"
},
"h":{
"type":"list",
"description":"Comma-separated list of column names to display"

View File

@ -1216,6 +1216,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
getEngine().failEngine(reason, e);
}
/**
* Acquire the searcher without applying the additional reader wrapper.
*/
public Engine.Searcher acquireSearcherNoWrap(String source) {
readAllowed();
markSearcherAccessed();
return getEngine().acquireSearcher(source, Engine.SearcherScope.EXTERNAL);
}
public Engine.Searcher acquireSearcher(String source) {
return acquireSearcher(source, Engine.SearcherScope.EXTERNAL);
}

View File

@ -1012,10 +1012,16 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
*/
public boolean canMatch(ShardSearchRequest request) throws IOException {
assert request.searchType() == SearchType.QUERY_THEN_FETCH : "unexpected search type: " + request.searchType();
try (DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, false, "can_match")) {
SearchSourceBuilder source = context.request().source();
if (canRewriteToMatchNone(source)) {
QueryBuilder queryBuilder = source.query();
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().getId());
// we don't want to use the reader wrapper since it could run costly operations
// and we can afford false positives.
try (Engine.Searcher searcher = indexShard.acquireSearcherNoWrap("can_match")) {
QueryShardContext context = indexService.newQueryShardContext(request.shardId().id(), searcher,
request::nowInMillis, request.getClusterAlias());
Rewriteable.rewrite(request.getRewriteable(), context, false);
if (canRewriteToMatchNone(request.source())) {
QueryBuilder queryBuilder = request.source().query();
return queryBuilder instanceof MatchNoneQueryBuilder == false;
}
return true; // null query means match_all

View File

@ -19,6 +19,9 @@
package org.elasticsearch.search;
import com.carrotsearch.hppc.IntArrayList;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.FilterDirectoryReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.AlreadyClosedException;
import org.elasticsearch.ElasticsearchException;
@ -76,6 +79,7 @@ import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.junit.Before;
import java.io.IOException;
import java.util.Collection;
@ -88,6 +92,7 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import static java.util.Collections.singletonList;
@ -111,7 +116,42 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return pluginList(FailOnRewriteQueryPlugin.class, CustomScriptPlugin.class, InternalOrPrivateSettingsPlugin.class);
return pluginList(FailOnRewriteQueryPlugin.class, CustomScriptPlugin.class,
ReaderWrapperCountPlugin.class, InternalOrPrivateSettingsPlugin.class);
}
public static class ReaderWrapperCountPlugin extends Plugin {
@Override
public void onIndexModule(IndexModule indexModule) {
indexModule.setReaderWrapper(service -> SearchServiceTests::apply);
}
}
@Before
private void resetCount() {
numWrapInvocations = new AtomicInteger(0);
}
private static AtomicInteger numWrapInvocations = new AtomicInteger(0);
private static DirectoryReader apply(DirectoryReader directoryReader) throws IOException {
numWrapInvocations.incrementAndGet();
return new FilterDirectoryReader(directoryReader,
new FilterDirectoryReader.SubReaderWrapper() {
@Override
public LeafReader wrap(LeafReader reader) {
return reader;
}
}) {
@Override
protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
return in;
}
@Override
public CacheHelper getReaderCacheHelper() {
return directoryReader.getReaderCacheHelper();
}
};
}
public static class CustomScriptPlugin extends MockScriptPlugin {
@ -559,6 +599,7 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index"));
final IndexShard indexShard = indexService.getShard(0);
SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true);
int numWrapReader = numWrapInvocations.get();
assertTrue(service.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 1,
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, -1, null, null)));
@ -582,6 +623,13 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
searchRequest.source(new SearchSourceBuilder().query(new MatchNoneQueryBuilder()));
assertFalse(service.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 1,
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, -1, null, null)));
assertEquals(numWrapReader, numWrapInvocations.get());
// make sure that the wrapper is called when the context is actually created
service.createContext(new ShardSearchRequest(OriginalIndices.NONE, searchRequest,
indexShard.shardId(), 1, new AliasFilter(null, Strings.EMPTY_ARRAY),
1f, -1, null, null)).close();
assertEquals(numWrapReader+1, numWrapInvocations.get());
}
public void testCanRewriteToMatchNone() {

View File

@ -24,8 +24,12 @@ Creates and updates role mappings.
==== {api-description-title}
Role mappings define which roles are assigned to each user. Each mapping has
_rules_ that identify users and a list of _roles_ that are
granted to those users.
_rules_ that identify users and a list of _roles_ that are granted to those users.
The role mapping APIs are generally the preferred way to manage role mappings
rather than using {stack-ov}/mapping-roles.html#mapping-roles-file[role mapping files].
The create or update role mappings API cannot update role mappings that are defined
in role mapping files.
NOTE: This API does not create roles. Rather, it maps users to existing roles.
Roles can be created by using <<security-api-roles, Role Management APIs>> or

View File

@ -21,7 +21,12 @@ Removes role mappings.
==== {api-description-title}
Role mappings define which roles are assigned to each user. For more information,
see <<mapping-roles>>.
see <<mapping-roles>>.
The role mapping APIs are generally the preferred way to manage role mappings
rather than using <<mapping-roles-file,role mapping files>>.
The delete role mappings API cannot remove role mappings that are defined
in role mapping files.
[[security-api-delete-role-mapping-path-params]]
==== {api-path-parms-title}

View File

@ -23,7 +23,12 @@ Retrieves role mappings.
==== {api-description-title}
Role mappings define which roles are assigned to each user. For more information,
see <<mapping-roles>>.
see <<mapping-roles>>.
The role mapping APIs are generally the preferred way to manage role mappings
rather than using <<mapping-roles-file,role mapping files>>.
The get role mappings API cannot retrieve role mappings that are defined
in role mapping files.
[[security-api-get-role-mapping-path-params]]
==== {api-path-parms-title}

View File

@ -66,6 +66,24 @@ You can change this default behavior by changing the
this is a common setting in Elasticsearch, changing its value might effect other
schedules in the system.
While the _role mapping APIs_ is he preferred way to manage role mappings, using
the `role_mappings.yml` file becomes useful in a couple of use cases:
. If you want to define fixed role mappings that no one (besides an administrator
with physical access to the {es} nodes) would be able to change.
. If cluster administration depends on users from external realms and these users
need to have their roles mapped to them even when the cluster is RED. For instance
an administrator that authenticates via LDAP or PKI and gets assigned an
administrator role so that they can perform corrective actions.
Please note however, that the role_mappings.yml file is provided
as a minimal administrative function and is not intended to cover and be used to
define roles for all use cases.
IMPORTANT: You cannot view, edit, or remove any roles that are defined in the role
mapping files by using the the role mapping APIs.
==== Realm specific details
[float]
[[ldap-role-mapping]]

View File

@ -91,6 +91,7 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config");
public static final ParseField HEADERS = new ParseField("headers");
public static final ParseField DELAYED_DATA_CHECK_CONFIG = new ParseField("delayed_data_check_config");
public static final ParseField MAX_EMPTY_SEARCHES = new ParseField("max_empty_searches");
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
public static final ObjectParser<Builder, Void> LENIENT_PARSER = createParser(true);
@ -152,6 +153,7 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
parser.declareObject(Builder::setDelayedDataCheckConfig,
ignoreUnknownFields ? DelayedDataCheckConfig.LENIENT_PARSER : DelayedDataCheckConfig.STRICT_PARSER,
DELAYED_DATA_CHECK_CONFIG);
parser.declareInt(Builder::setMaxEmptySearches, MAX_EMPTY_SEARCHES);
return parser;
}
@ -176,11 +178,12 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
private final ChunkingConfig chunkingConfig;
private final Map<String, String> headers;
private final DelayedDataCheckConfig delayedDataCheckConfig;
private final Integer maxEmptySearches;
private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices,
QueryProvider queryProvider, AggProvider aggProvider, List<SearchSourceBuilder.ScriptField> scriptFields,
Integer scrollSize, ChunkingConfig chunkingConfig, Map<String, String> headers,
DelayedDataCheckConfig delayedDataCheckConfig) {
DelayedDataCheckConfig delayedDataCheckConfig, Integer maxEmptySearches) {
this.id = id;
this.jobId = jobId;
this.queryDelay = queryDelay;
@ -193,6 +196,7 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
this.chunkingConfig = chunkingConfig;
this.headers = Collections.unmodifiableMap(headers);
this.delayedDataCheckConfig = delayedDataCheckConfig;
this.maxEmptySearches = maxEmptySearches;
}
public DatafeedConfig(StreamInput in) throws IOException {
@ -233,6 +237,11 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
} else {
delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig();
}
if (in.getVersion().onOrAfter(Version.V_7_5_0)) {
maxEmptySearches = in.readOptionalVInt();
} else {
maxEmptySearches = null;
}
}
/**
@ -401,6 +410,10 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
return delayedDataCheckConfig;
}
public Integer getMaxEmptySearches() {
return maxEmptySearches;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(id);
@ -439,6 +452,9 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
if (out.getVersion().onOrAfter(Version.V_6_6_0)) {
out.writeOptionalWriteable(delayedDataCheckConfig);
}
if (out.getVersion().onOrAfter(Version.V_7_5_0)) {
out.writeOptionalVInt(maxEmptySearches);
}
}
@Override
@ -475,6 +491,9 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
if (delayedDataCheckConfig != null) {
builder.field(DELAYED_DATA_CHECK_CONFIG.getPreferredName(), delayedDataCheckConfig);
}
if (maxEmptySearches != null) {
builder.field(MAX_EMPTY_SEARCHES.getPreferredName(), maxEmptySearches);
}
builder.endObject();
return builder;
}
@ -507,13 +526,14 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
&& Objects.equals(this.scriptFields, that.scriptFields)
&& Objects.equals(this.chunkingConfig, that.chunkingConfig)
&& Objects.equals(this.headers, that.headers)
&& Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig);
&& Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig)
&& Objects.equals(this.maxEmptySearches, that.maxEmptySearches);
}
@Override
public int hashCode() {
return Objects.hash(id, jobId, frequency, queryDelay, indices, queryProvider, scrollSize, aggProvider, scriptFields, chunkingConfig,
headers, delayedDataCheckConfig);
headers, delayedDataCheckConfig, maxEmptySearches);
}
@Override
@ -586,6 +606,7 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
private ChunkingConfig chunkingConfig;
private Map<String, String> headers = Collections.emptyMap();
private DelayedDataCheckConfig delayedDataCheckConfig = DelayedDataCheckConfig.defaultDelayedDataCheckConfig();
private Integer maxEmptySearches;
public Builder() { }
@ -608,6 +629,7 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
this.chunkingConfig = config.chunkingConfig;
this.headers = new HashMap<>(config.headers);
this.delayedDataCheckConfig = config.getDelayedDataCheckConfig();
this.maxEmptySearches = config.getMaxEmptySearches();
}
public void setId(String datafeedId) {
@ -701,6 +723,18 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
this.delayedDataCheckConfig = delayedDataCheckConfig;
}
public void setMaxEmptySearches(int maxEmptySearches) {
if (maxEmptySearches == -1) {
this.maxEmptySearches = null;
} else if (maxEmptySearches <= 0) {
String msg = Messages.getMessage(Messages.DATAFEED_CONFIG_INVALID_OPTION_VALUE,
DatafeedConfig.MAX_EMPTY_SEARCHES.getPreferredName(), maxEmptySearches);
throw ExceptionsHelper.badRequestException(msg);
} else {
this.maxEmptySearches = maxEmptySearches;
}
}
public DatafeedConfig build() {
ExceptionsHelper.requireNonNull(id, ID.getPreferredName());
ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
@ -716,7 +750,7 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
setDefaultQueryDelay();
return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, queryProvider, aggProvider, scriptFields, scrollSize,
chunkingConfig, headers, delayedDataCheckConfig);
chunkingConfig, headers, delayedDataCheckConfig, maxEmptySearches);
}
void validateScriptFields() {

View File

@ -81,6 +81,7 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
PARSER.declareObject(Builder::setDelayedDataCheckConfig,
DelayedDataCheckConfig.STRICT_PARSER,
DatafeedConfig.DELAYED_DATA_CHECK_CONFIG);
PARSER.declareInt(Builder::setMaxEmptySearches, DatafeedConfig.MAX_EMPTY_SEARCHES);
}
private final String id;
@ -94,11 +95,13 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
private final Integer scrollSize;
private final ChunkingConfig chunkingConfig;
private final DelayedDataCheckConfig delayedDataCheckConfig;
private final Integer maxEmptySearches;
private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices,
QueryProvider queryProvider, AggProvider aggProvider,
List<SearchSourceBuilder.ScriptField> scriptFields,
Integer scrollSize, ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) {
Integer scrollSize, ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig,
Integer maxEmptySearches) {
this.id = id;
this.jobId = jobId;
this.queryDelay = queryDelay;
@ -110,6 +113,7 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
this.scrollSize = scrollSize;
this.chunkingConfig = chunkingConfig;
this.delayedDataCheckConfig = delayedDataCheckConfig;
this.maxEmptySearches = maxEmptySearches;
}
public DatafeedUpdate(StreamInput in) throws IOException {
@ -147,6 +151,11 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
} else {
delayedDataCheckConfig = null;
}
if (in.getVersion().onOrAfter(Version.V_7_5_0)) {
maxEmptySearches = in.readOptionalInt();
} else {
maxEmptySearches = null;
}
}
/**
@ -192,6 +201,9 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
if (out.getVersion().onOrAfter(Version.V_6_6_0)) {
out.writeOptionalWriteable(delayedDataCheckConfig);
}
if (out.getVersion().onOrAfter(Version.V_7_5_0)) {
out.writeOptionalInt(maxEmptySearches);
}
}
@Override
@ -222,6 +234,8 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
addOptionalField(builder, DatafeedConfig.SCROLL_SIZE, scrollSize);
addOptionalField(builder, DatafeedConfig.CHUNKING_CONFIG, chunkingConfig);
addOptionalField(builder, DatafeedConfig.DELAYED_DATA_CHECK_CONFIG, delayedDataCheckConfig);
addOptionalField(builder, DatafeedConfig.MAX_EMPTY_SEARCHES, maxEmptySearches);
builder.endObject();
return builder;
}
@ -290,6 +304,10 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
return delayedDataCheckConfig;
}
public Integer getMaxEmptySearches() {
return maxEmptySearches;
}
/**
* Applies the update to the given {@link DatafeedConfig}
* @return a new {@link DatafeedConfig} that contains the update
@ -334,6 +352,9 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
if (delayedDataCheckConfig != null) {
builder.setDelayedDataCheckConfig(delayedDataCheckConfig);
}
if (maxEmptySearches != null) {
builder.setMaxEmptySearches(maxEmptySearches);
}
if (headers.isEmpty() == false) {
// Adjust the request, adding security headers from the current thread context
@ -373,13 +394,14 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
&& Objects.equals(this.aggProvider, that.aggProvider)
&& Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig)
&& Objects.equals(this.scriptFields, that.scriptFields)
&& Objects.equals(this.chunkingConfig, that.chunkingConfig);
&& Objects.equals(this.chunkingConfig, that.chunkingConfig)
&& Objects.equals(this.maxEmptySearches, that.maxEmptySearches);
}
@Override
public int hashCode() {
return Objects.hash(id, jobId, frequency, queryDelay, indices, queryProvider, scrollSize, aggProvider, scriptFields, chunkingConfig,
delayedDataCheckConfig);
delayedDataCheckConfig, maxEmptySearches);
}
@Override
@ -396,7 +418,8 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
&& (aggProvider == null || Objects.equals(aggProvider.getAggs(), datafeed.getAggregations()))
&& (scriptFields == null || Objects.equals(scriptFields, datafeed.getScriptFields()))
&& (delayedDataCheckConfig == null || Objects.equals(delayedDataCheckConfig, datafeed.getDelayedDataCheckConfig()))
&& (chunkingConfig == null || Objects.equals(chunkingConfig, datafeed.getChunkingConfig()));
&& (chunkingConfig == null || Objects.equals(chunkingConfig, datafeed.getChunkingConfig()))
&& (maxEmptySearches == null || Objects.equals(maxEmptySearches, datafeed.getMaxEmptySearches()));
}
public static class Builder {
@ -412,6 +435,7 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
private Integer scrollSize;
private ChunkingConfig chunkingConfig;
private DelayedDataCheckConfig delayedDataCheckConfig;
private Integer maxEmptySearches;
public Builder() {
}
@ -432,6 +456,7 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
this.scrollSize = config.scrollSize;
this.chunkingConfig = config.chunkingConfig;
this.delayedDataCheckConfig = config.delayedDataCheckConfig;
this.maxEmptySearches = config.maxEmptySearches;
}
public Builder setId(String datafeedId) {
@ -499,9 +524,19 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
return this;
}
public Builder setMaxEmptySearches(int maxEmptySearches) {
if (maxEmptySearches < -1 || maxEmptySearches == 0) {
String msg = Messages.getMessage(Messages.DATAFEED_CONFIG_INVALID_OPTION_VALUE,
DatafeedConfig.MAX_EMPTY_SEARCHES.getPreferredName(), maxEmptySearches);
throw ExceptionsHelper.badRequestException(msg);
}
this.maxEmptySearches = maxEmptySearches;
return this;
}
public DatafeedUpdate build() {
return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, queryProvider, aggProvider, scriptFields, scrollSize,
chunkingConfig, delayedDataCheckConfig);
chunkingConfig, delayedDataCheckConfig, maxEmptySearches);
}
}
}

View File

@ -27,6 +27,7 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage {
private static final String AUDIT_XFIELD = "audit";
private static final String IP_FILTER_XFIELD = "ipfilter";
private static final String ANONYMOUS_XFIELD = "anonymous";
private static final String FIPS_140_XFIELD = "fips_140";
private Map<String, Object> realmsUsage;
private Map<String, Object> rolesStoreUsage;
@ -37,6 +38,7 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage {
private Map<String, Object> ipFilterUsage;
private Map<String, Object> anonymousUsage;
private Map<String, Object> roleMappingStoreUsage;
private Map<String, Object> fips140Usage;
public SecurityFeatureSetUsage(StreamInput in) throws IOException {
super(in);
@ -55,13 +57,17 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage {
}
anonymousUsage = in.readMap();
roleMappingStoreUsage = in.readMap();
if (in.getVersion().onOrAfter(Version.V_7_5_0)) {
fips140Usage = in.readMap();
}
}
public SecurityFeatureSetUsage(boolean available, boolean enabled, Map<String, Object> realmsUsage,
Map<String, Object> rolesStoreUsage, Map<String, Object> roleMappingStoreUsage,
Map<String, Object> sslUsage, Map<String, Object> auditUsage,
Map<String, Object> ipFilterUsage, Map<String, Object> anonymousUsage,
Map<String, Object> tokenServiceUsage, Map<String, Object> apiKeyServiceUsage) {
Map<String, Object> tokenServiceUsage, Map<String, Object> apiKeyServiceUsage,
Map<String, Object> fips140Usage) {
super(XPackField.SECURITY, available, enabled);
this.realmsUsage = realmsUsage;
this.rolesStoreUsage = rolesStoreUsage;
@ -72,6 +78,7 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage {
this.auditUsage = auditUsage;
this.ipFilterUsage = ipFilterUsage;
this.anonymousUsage = anonymousUsage;
this.fips140Usage = fips140Usage;
}
@Override
@ -92,6 +99,9 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage {
}
out.writeMap(anonymousUsage);
out.writeMap(roleMappingStoreUsage);
if (out.getVersion().onOrAfter(Version.V_7_5_0)) {
out.writeMap(fips140Usage);
}
}
@Override
@ -107,6 +117,7 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage {
builder.field(AUDIT_XFIELD, auditUsage);
builder.field(IP_FILTER_XFIELD, ipFilterUsage);
builder.field(ANONYMOUS_XFIELD, anonymousUsage);
builder.field(FIPS_140_XFIELD, fips140Usage);
} else if (sslUsage.isEmpty() == false) {
// A trial (or basic) license can have SSL without security.
// This is because security defaults to disabled on that license, but that dynamic-default does not disable SSL.

View File

@ -23,7 +23,9 @@ import org.elasticsearch.snapshots.SnapshotInfo;
import org.elasticsearch.snapshots.SnapshotState;
import java.io.IOException;
import java.util.Arrays;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Objects;
import java.util.Set;
@ -127,14 +129,15 @@ public class SnapshotRetentionConfiguration implements ToXContentObject, Writeab
.mapToLong(SnapshotInfo::startTime)
.max()
.orElse(Long.MIN_VALUE);
final Set<SnapshotState> unsuccessfulStates = new HashSet<>(Arrays.asList(SnapshotState.FAILED, SnapshotState.PARTIAL));
return si -> {
final String snapName = si.snapshotId().getName();
// First, if there's no expire_after and a more recent successful snapshot, we can delete all the failed ones
if (this.expireAfter == null && SnapshotState.FAILED.equals(si.state()) && newestSuccessfulTimestamp > si.startTime()) {
if (this.expireAfter == null && unsuccessfulStates.contains(si.state()) && newestSuccessfulTimestamp > si.startTime()) {
// There's no expire_after and there's a more recent successful snapshot, delete this failed one
logger.trace("[{}]: ELIGIBLE as it is FAILED and there is a more recent successful snapshot", snapName);
logger.trace("[{}]: ELIGIBLE as it is {} and there is a more recent successful snapshot", snapName, si.state());
return true;
}
@ -167,13 +170,13 @@ public class SnapshotRetentionConfiguration implements ToXContentObject, Writeab
// expiration time
if (this.minimumSnapshotCount != null) {
if (successfulSnapshotCount <= this.minimumSnapshotCount)
if (SnapshotState.FAILED.equals(si.state()) == false) {
if (unsuccessfulStates.contains(si.state()) == false) {
logger.trace("[{}]: INELIGIBLE as there are {} non-failed snapshots ({} total) and {} minimum snapshots needed",
snapName, successfulSnapshotCount, totalSnapshotCount, this.minimumSnapshotCount);
return false;
} else {
logger.trace("[{}]: SKIPPING minimum snapshot count check as this snapshot is {} and not counted " +
"towards the minimum snapshot count.", snapName, SnapshotState.FAILED);
"towards the minimum snapshot count.", snapName, si.state());
}
}
@ -190,10 +193,11 @@ public class SnapshotRetentionConfiguration implements ToXContentObject, Writeab
final Stream<SnapshotInfo> successfulSnapsEligibleForExpiration = sortedSnapshots.stream()
.filter(snap -> SnapshotState.SUCCESS.equals(snap.state()))
.limit(eligibleForExpiration);
final Stream<SnapshotInfo> failedSnaps = sortedSnapshots.stream()
.filter(snap -> SnapshotState.FAILED.equals(snap.state()));
final Stream<SnapshotInfo> unsucessfulSnaps = sortedSnapshots.stream()
.filter(snap -> unsuccessfulStates.contains(snap.state()));
final Set<SnapshotInfo> snapsEligibleForExpiration = Stream.concat(successfulSnapsEligibleForExpiration, failedSnaps)
final Set<SnapshotInfo> snapsEligibleForExpiration = Stream
.concat(successfulSnapsEligibleForExpiration, unsucessfulSnaps)
.collect(Collectors.toSet());
if (snapsEligibleForExpiration.contains(si) == false) {

View File

@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.datafeed;
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.Version;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
@ -68,6 +69,7 @@ import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue;
public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedConfig> {
@ -149,6 +151,9 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
if (randomBoolean()) {
builder.setDelayedDataCheckConfig(DelayedDataCheckConfigTests.createRandomizedConfig(bucketSpanMillis));
}
if (randomBoolean()) {
builder.setMaxEmptySearches(randomIntBetween(10, 100));
}
return builder;
}
@ -378,10 +383,10 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
defaultFeedBuilder.setIndices(Collections.singletonList("index"));
DatafeedConfig defaultFeed = defaultFeedBuilder.build();
assertThat(defaultFeed.getScrollSize(), equalTo(1000));
assertThat(defaultFeed.getQueryDelay().seconds(), greaterThanOrEqualTo(60L));
assertThat(defaultFeed.getQueryDelay().seconds(), lessThan(120L));
assertThat(defaultFeed.getMaxEmptySearches(), is(nullValue()));
}
public void testDefaultQueryDelay() {
@ -406,6 +411,20 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
expectThrows(IllegalArgumentException.class, () -> conf.setIndices(null));
}
public void testCheckValid_GivenInvalidMaxEmptySearches() {
DatafeedConfig.Builder conf = new DatafeedConfig.Builder("datafeed1", "job1");
ElasticsearchStatusException e =
expectThrows(ElasticsearchStatusException.class, () -> conf.setMaxEmptySearches(randomFrom(-2, 0)));
assertThat(e.getMessage(), containsString("Invalid max_empty_searches value"));
}
public void testCheckValid_GivenMaxEmptySearchesMinusOne() {
DatafeedConfig.Builder conf = new DatafeedConfig.Builder("datafeed1", "job1");
conf.setIndices(Collections.singletonList("whatever"));
conf.setMaxEmptySearches(-1);
assertThat(conf.build().getMaxEmptySearches(), is(nullValue()));
}
public void testCheckValid_GivenEmptyIndices() {
DatafeedConfig.Builder conf = new DatafeedConfig.Builder("datafeed1", "job1");
conf.setIndices(Collections.emptyList());
@ -824,7 +843,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
@Override
protected DatafeedConfig mutateInstance(DatafeedConfig instance) throws IOException {
DatafeedConfig.Builder builder = new DatafeedConfig.Builder(instance);
switch (between(0, 9)) {
switch (between(0, 10)) {
case 0:
builder.setId(instance.getId() + randomValidDatafeedId());
break;
@ -886,6 +905,13 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase<DatafeedCon
builder.setChunkingConfig(ChunkingConfig.newAuto());
}
break;
case 10:
if (instance.getMaxEmptySearches() == null) {
builder.setMaxEmptySearches(randomIntBetween(10, 100));
} else {
builder.setMaxEmptySearches(instance.getMaxEmptySearches() + 1);
}
break;
default:
throw new AssertionError("Illegal randomisation branch");
}

View File

@ -121,6 +121,9 @@ public class DatafeedUpdateTests extends AbstractSerializingTestCase<DatafeedUpd
if (randomBoolean()) {
builder.setDelayedDataCheckConfig(DelayedDataCheckConfigTests.createRandomizedConfig(randomLongBetween(300_001, 400_000)));
}
if (randomBoolean()) {
builder.setMaxEmptySearches(randomBoolean() ? -1 : randomIntBetween(10, 100));
}
return builder.build();
}
@ -339,7 +342,7 @@ public class DatafeedUpdateTests extends AbstractSerializingTestCase<DatafeedUpd
@Override
protected DatafeedUpdate mutateInstance(DatafeedUpdate instance) throws IOException {
DatafeedUpdate.Builder builder = new DatafeedUpdate.Builder(instance);
switch (between(0, 9)) {
switch (between(0, 10)) {
case 0:
builder.setId(instance.getId() + DatafeedConfigTests.randomValidDatafeedId());
break;
@ -413,6 +416,13 @@ public class DatafeedUpdateTests extends AbstractSerializingTestCase<DatafeedUpd
builder.setChunkingConfig(null);
}
break;
case 10:
if (instance.getMaxEmptySearches() == null) {
builder.setMaxEmptySearches(randomFrom(-1, 10));
} else {
builder.setMaxEmptySearches(instance.getMaxEmptySearches() + 100);
}
break;
default:
throw new AssertionError("Illegal randomisation branch");
}

View File

@ -11,6 +11,7 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.snapshots.SnapshotInfo;
import org.elasticsearch.snapshots.SnapshotShardFailure;
import org.elasticsearch.snapshots.SnapshotState;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicy;
import org.elasticsearch.xpack.core.slm.SnapshotRetentionConfiguration;
@ -103,13 +104,21 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase {
}
public void testFailuresDeletedIfExpired() {
assertUnsuccessfulDeletedIfExpired(true);
}
public void testPartialsDeletedIfExpired() {
assertUnsuccessfulDeletedIfExpired(false);
}
private void assertUnsuccessfulDeletedIfExpired(boolean failure) {
SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(
() -> TimeValue.timeValueDays(1).millis() + 1,
TimeValue.timeValueDays(1), null, null);
SnapshotInfo oldInfo = makeFailureInfo(0);
SnapshotInfo oldInfo = makeFailureOrPartial(0, failure);
assertThat(conf.getSnapshotDeletionPredicate(Collections.singletonList(oldInfo)).test(oldInfo), equalTo(true));
SnapshotInfo newInfo = makeFailureInfo(1);
SnapshotInfo newInfo = makeFailureOrPartial(1, failure);
assertThat(conf.getSnapshotDeletionPredicate(Collections.singletonList(newInfo)).test(newInfo), equalTo(false));
List<SnapshotInfo> infos = new ArrayList<>();
@ -120,10 +129,18 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase {
}
public void testFailuresDeletedIfNoExpiryAndMoreRecentSuccessExists() {
assertUnsuccessfulDeletedIfNoExpiryAndMoreRecentSuccessExists(true);
}
public void testPartialsDeletedIfNoExpiryAndMoreRecentSuccessExists() {
assertUnsuccessfulDeletedIfNoExpiryAndMoreRecentSuccessExists(false);
}
private void assertUnsuccessfulDeletedIfNoExpiryAndMoreRecentSuccessExists(boolean failure) {
SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> 1, null, 2, 5);
SnapshotInfo s1 = makeInfo(1);
SnapshotInfo s2 = makeInfo(2);
SnapshotInfo s3 = makeFailureInfo(3);
SnapshotInfo s3 = makeFailureOrPartial(3, failure);
SnapshotInfo s4 = makeInfo(4);
List<SnapshotInfo> infos = Arrays.asList(s1 , s2, s3, s4);
@ -134,12 +151,20 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase {
}
public void testFailuresKeptIfNoExpiryAndNoMoreRecentSuccess() {
assertUnsuccessfulKeptIfNoExpiryAndNoMoreRecentSuccess(true);
}
public void testPartialsKeptIfNoExpiryAndNoMoreRecentSuccess() {
assertUnsuccessfulKeptIfNoExpiryAndNoMoreRecentSuccess(false);
}
private void assertUnsuccessfulKeptIfNoExpiryAndNoMoreRecentSuccess(boolean failure) {
// Also tests that failures are not counted towards the maximum
SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> 1, null, 2, 3);
SnapshotInfo s1 = makeInfo(1);
SnapshotInfo s2 = makeInfo(2);
SnapshotInfo s3 = makeInfo(3);
SnapshotInfo s4 = makeFailureInfo(4);
SnapshotInfo s4 = makeFailureOrPartial(4, failure);
List<SnapshotInfo> infos = Arrays.asList(s1 , s2, s3, s4);
assertThat(conf.getSnapshotDeletionPredicate(infos).test(s1), equalTo(false));
@ -149,11 +174,19 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase {
}
public void testFailuresNotCountedTowardsMaximum() {
assertUnsuccessfulNotCountedTowardsMaximum(true);
}
public void testPartialsNotCountedTowardsMaximum() {
assertUnsuccessfulNotCountedTowardsMaximum(false);
}
private void assertUnsuccessfulNotCountedTowardsMaximum(boolean failure) {
SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> 1, TimeValue.timeValueDays(1), 2, 2);
SnapshotInfo s1 = makeInfo(1);
SnapshotInfo s2 = makeFailureInfo(2);
SnapshotInfo s3 = makeFailureInfo(3);
SnapshotInfo s4 = makeFailureInfo(4);
SnapshotInfo s2 = makeFailureOrPartial(2, failure);
SnapshotInfo s3 = makeFailureOrPartial(3, failure);
SnapshotInfo s4 = makeFailureOrPartial(4, failure);
SnapshotInfo s5 = makeInfo(5);
List<SnapshotInfo> infos = Arrays.asList(s1 , s2, s3, s4, s5);
@ -165,10 +198,18 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase {
}
public void testFailuresNotCountedTowardsMinimum() {
assertUnsuccessfulNotCountedTowardsMinimum(true);
}
public void testPartialsNotCountedTowardsMinimum() {
assertUnsuccessfulNotCountedTowardsMinimum(false);
}
private void assertUnsuccessfulNotCountedTowardsMinimum(boolean failure) {
SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> TimeValue.timeValueDays(1).millis() + 1,
TimeValue.timeValueDays(1), 2, null);
SnapshotInfo oldInfo = makeInfo(0);
SnapshotInfo failureInfo = makeFailureInfo( 1);
SnapshotInfo failureInfo = makeFailureOrPartial(1, failure);
SnapshotInfo newInfo = makeInfo(2);
List<SnapshotInfo> infos = new ArrayList<>();
@ -186,12 +227,14 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase {
assertThat(conf.getSnapshotDeletionPredicate(infos).test(oldInfo), equalTo(true));
}
public void testMostRecentSuccessfulTimestampIsUsed() {
boolean failureBeforePartial = randomBoolean();
SnapshotRetentionConfiguration conf = new SnapshotRetentionConfiguration(() -> 1, null, 2, 2);
SnapshotInfo s1 = makeInfo(1);
SnapshotInfo s2 = makeInfo(2);
SnapshotInfo s3 = makeFailureInfo(3);
SnapshotInfo s4 = makeFailureInfo(4);
SnapshotInfo s3 = makeFailureOrPartial(3, failureBeforePartial);
SnapshotInfo s4 = makeFailureOrPartial(4, failureBeforePartial == false);
List<SnapshotInfo> infos = Arrays.asList(s1 , s2, s3, s4);
assertThat(conf.getSnapshotDeletionPredicate(infos).test(s1), equalTo(false));
@ -204,15 +247,25 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase {
final Map<String, Object> meta = new HashMap<>();
meta.put(SnapshotLifecyclePolicy.POLICY_ID_METADATA_FIELD, REPO);
final int totalShards = between(1,20);
return new SnapshotInfo(new SnapshotId("snap-" + randomAlphaOfLength(3), "uuid"),
SnapshotInfo snapInfo = new SnapshotInfo(new SnapshotId("snap-" + randomAlphaOfLength(3), "uuid"),
Collections.singletonList("foo"),
startTime,
null,
startTime + between(1,10000),
startTime + between(1, 10000),
totalShards,
new ArrayList<>(),
false,
meta);
assertThat(snapInfo.state(), equalTo(SnapshotState.SUCCESS));
return snapInfo;
}
private SnapshotInfo makeFailureOrPartial(long startTime, boolean failure) {
if (failure) {
return makeFailureInfo(startTime);
} else {
return makePartialInfo(startTime);
}
}
private SnapshotInfo makeFailureInfo(long startTime) {
@ -225,14 +278,39 @@ public class SnapshotRetentionConfigurationTests extends ESTestCase {
failures.add(new SnapshotShardFailure("nodeId", new ShardId("index-name", "index-uuid", i), "failed"));
}
assert failureCount == failures.size();
return new SnapshotInfo(new SnapshotId("snap-fail-" + randomAlphaOfLength(3), "uuid-fail"),
SnapshotInfo snapInfo = new SnapshotInfo(new SnapshotId("snap-fail-" + randomAlphaOfLength(3), "uuid-fail"),
Collections.singletonList("foo-fail"),
startTime,
"forced-failure",
startTime + between(1,10000),
startTime + between(1, 10000),
totalShards,
failures,
randomBoolean(),
meta);
assertThat(snapInfo.state(), equalTo(SnapshotState.FAILED));
return snapInfo;
}
private SnapshotInfo makePartialInfo(long startTime) {
final Map<String, Object> meta = new HashMap<>();
meta.put(SnapshotLifecyclePolicy.POLICY_ID_METADATA_FIELD, REPO);
final int totalShards = between(2,20);
final List<SnapshotShardFailure> failures = new ArrayList<>();
final int failureCount = between(1,totalShards - 1);
for (int i = 0; i < failureCount; i++) {
failures.add(new SnapshotShardFailure("nodeId", new ShardId("index-name", "index-uuid", i), "failed"));
}
assert failureCount == failures.size();
SnapshotInfo snapInfo = new SnapshotInfo(new SnapshotId("snap-fail-" + randomAlphaOfLength(3), "uuid-fail"),
Collections.singletonList("foo-fail"),
startTime,
null,
startTime + between(1, 10000),
totalShards,
failures,
randomBoolean(),
meta);
assertThat(snapInfo.state(), equalTo(SnapshotState.PARTIAL));
return snapInfo;
}
}

View File

@ -10,7 +10,6 @@ import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest;
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations;
@ -50,20 +49,17 @@ public final class TransportFreezeIndexAction extends
private final DestructiveOperations destructiveOperations;
private final MetaDataIndexStateService indexStateService;
private final TransportCloseIndexAction transportCloseIndexAction;
@Inject
public TransportFreezeIndexAction(MetaDataIndexStateService indexStateService, TransportService transportService,
ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
DestructiveOperations destructiveOperations,
TransportCloseIndexAction transportCloseIndexAction) {
DestructiveOperations destructiveOperations) {
super(FreezeIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, FreezeRequest::new,
indexNameExpressionResolver);
this.destructiveOperations = destructiveOperations;
this.indexStateService = indexStateService;
this.transportCloseIndexAction = transportCloseIndexAction;
}
@Override
protected String executor() {

View File

@ -250,7 +250,7 @@ public class SnapshotRetentionTask implements SchedulerEngine.Listener {
@Override
public void onResponse(GetSnapshotsResponse resp) {
final Set<SnapshotState> retainableStates =
new HashSet<>(Arrays.asList(SnapshotState.SUCCESS, SnapshotState.FAILED));
new HashSet<>(Arrays.asList(SnapshotState.SUCCESS, SnapshotState.FAILED, SnapshotState.PARTIAL));
try {
snapshots.compute(repository, (k, previousSnaps) -> {
if (previousSnaps != null) {

View File

@ -39,6 +39,7 @@ import org.elasticsearch.xpack.core.slm.action.GetSnapshotLifecycleAction;
import org.elasticsearch.xpack.core.slm.action.PutSnapshotLifecycleAction;
import org.elasticsearch.xpack.ilm.IndexLifecycle;
import org.junit.After;
import org.junit.Before;
import java.util.Arrays;
import java.util.Collection;
@ -60,11 +61,19 @@ import static org.hamcrest.Matchers.greaterThan;
*/
@TestLogging(value = "org.elasticsearch.snapshots.mockstore:DEBUG",
reason = "https://github.com/elastic/elasticsearch/issues/46508")
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase {
private static final String REPO = "repo-id";
@Before
public void ensureClusterNodes() {
logger.info("--> starting enough nodes to ensure we have enough to safely stop for tests");
internalCluster().startMasterOnlyNodes(2);
internalCluster().startDataOnlyNodes(2);
ensureGreen();
}
@After
public void resetSLMSettings() throws Exception {
// Cancel/delete all snapshots
@ -180,7 +189,7 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase {
logger.info("--> creating policy {}", policyId);
createSnapshotPolicy(policyId, "snap", "1 2 3 4 5 ?", REPO, indexName, true,
new SnapshotRetentionConfiguration(TimeValue.timeValueSeconds(0), null, null));
false, new SnapshotRetentionConfiguration(TimeValue.timeValueSeconds(0), null, null));
// Create a snapshot and wait for it to be complete (need something that can be deleted)
final String completedSnapshotName = executePolicy(policyId);
@ -281,20 +290,26 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase {
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/47937")
public void testBasicFailureRetention() throws Exception {
testUnsuccessfulSnapshotRetention(false);
}
public void testBasicPartialRetention() throws Exception {
testUnsuccessfulSnapshotRetention(true);
}
private void testUnsuccessfulSnapshotRetention(boolean partialSuccess) throws Exception {
final String indexName = "test-idx";
final String policyId = "test-policy";
final SnapshotState expectedUnsuccessfulState = partialSuccess ? SnapshotState.PARTIAL : SnapshotState.FAILED;
// Setup
logger.info("--> starting two master nodes and two data nodes");
internalCluster().startMasterOnlyNodes(2);
internalCluster().startDataOnlyNodes(2);
createAndPopulateIndex(indexName);
// Create a snapshot repo
initializeRepo(REPO);
createSnapshotPolicy(policyId, "snap", "1 2 3 4 5 ?", REPO, indexName, true,
new SnapshotRetentionConfiguration(null, 1, 2));
partialSuccess, new SnapshotRetentionConfiguration(null, 1, 2));
// Create a failed snapshot
AtomicReference<String> failedSnapshotName = new AtomicReference<>();
@ -321,12 +336,17 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase {
failedSnapshotName.set(snapshotFuture.get().getSnapshotName());
assertNotNull(failedSnapshotName.get());
logger.info("--> verify that snapshot [{}] failed", failedSnapshotName.get());
logger.info("--> verify that snapshot [{}] is {}", failedSnapshotName.get(), expectedUnsuccessfulState);
assertBusy(() -> {
GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster()
.prepareGetSnapshots(REPO).setSnapshots(failedSnapshotName.get()).get();
SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
assertEquals(SnapshotState.FAILED, snapshotInfo.state());
try {
GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster()
.prepareGetSnapshots(REPO).setSnapshots(failedSnapshotName.get()).get();
SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
assertEquals(expectedUnsuccessfulState, snapshotInfo.state());
} catch (SnapshotMissingException ex) {
logger.info("failed to find snapshot {}, retrying", failedSnapshotName);
throw new AssertionError(ex);
}
});
}
@ -370,14 +390,14 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase {
GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster()
.prepareGetSnapshots(REPO).setSnapshots(failedSnapshotName.get()).get();
SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
assertEquals(SnapshotState.FAILED, snapshotInfo.state());
assertEquals(expectedUnsuccessfulState, snapshotInfo.state());
}
// Run retention again and make sure the failure was deleted
{
logger.info("--> executing SLM retention");
assertAcked(client().execute(ExecuteSnapshotRetentionAction.INSTANCE, new ExecuteSnapshotRetentionAction.Request()).get());
logger.info("--> waiting for failed snapshot [{}] to be deleted", failedSnapshotName.get());
logger.info("--> waiting for {} snapshot [{}] to be deleted", expectedUnsuccessfulState, failedSnapshotName.get());
assertBusy(() -> {
try {
GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster()
@ -386,8 +406,8 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase {
} catch (SnapshotMissingException e) {
// This is what we want to happen
}
logger.info("--> failed snapshot [{}] has been deleted, checking successful snapshot [{}] still exists",
failedSnapshotName.get(), successfulSnapshotName.get());
logger.info("--> {} snapshot [{}] has been deleted, checking successful snapshot [{}] still exists",
expectedUnsuccessfulState, failedSnapshotName.get(), successfulSnapshotName.get());
GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster()
.prepareGetSnapshots(REPO).setSnapshots(successfulSnapshotName.get()).get();
SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
@ -424,15 +444,16 @@ public class SLMSnapshotBlockingIntegTests extends ESIntegTestCase {
private void createSnapshotPolicy(String policyName, String snapshotNamePattern, String schedule, String REPO,
String indexPattern, boolean ignoreUnavailable) {
createSnapshotPolicy(policyName, snapshotNamePattern, schedule, REPO, indexPattern,
ignoreUnavailable, SnapshotRetentionConfiguration.EMPTY);
ignoreUnavailable, false, SnapshotRetentionConfiguration.EMPTY);
}
private void createSnapshotPolicy(String policyName, String snapshotNamePattern, String schedule, String REPO,
String indexPattern, boolean ignoreUnavailable,
SnapshotRetentionConfiguration retention) {
boolean partialSnapsAllowed, SnapshotRetentionConfiguration retention) {
Map<String, Object> snapConfig = new HashMap<>();
snapConfig.put("indices", Collections.singletonList(indexPattern));
snapConfig.put("ignore_unavailable", ignoreUnavailable);
snapConfig.put("partial", partialSnapsAllowed);
if (randomBoolean()) {
Map<String, Object> metadata = new HashMap<>();
int fieldCount = randomIntBetween(2,5);

View File

@ -17,6 +17,7 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.ConcurrentMapLong;
import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction;
import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction;
import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction;
import org.elasticsearch.xpack.core.ml.action.KillProcessAction;
import org.elasticsearch.xpack.core.ml.action.PutJobAction;
import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction;
@ -51,7 +52,7 @@ import static org.hamcrest.Matchers.hasSize;
public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase {
@After
public void cleanup() throws Exception {
public void cleanup() {
cleanUp();
}
@ -111,7 +112,7 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase {
Job.Builder job = createScheduledJob("lookback-job-datafeed-recreated");
String datafeedId = "lookback-datafeed-datafeed-recreated";
DatafeedConfig datafeedConfig = createDatafeed(datafeedId, job.getId(), Arrays.asList("data"));
DatafeedConfig datafeedConfig = createDatafeed(datafeedId, job.getId(), Collections.singletonList("data"));
registerJob(job);
putJob(job);
@ -192,7 +193,7 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase {
putJob(job);
String datafeedId = "lookback-datafeed-query-delay-updated";
DatafeedConfig datafeedConfig = createDatafeed(datafeedId, job.getId(), Arrays.asList("data"));
DatafeedConfig datafeedConfig = createDatafeed(datafeedId, job.getId(), Collections.singletonList("data"));
registerDatafeed(datafeedConfig);
putDatafeed(datafeedConfig);
@ -247,6 +248,26 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase {
});
}
public void testRealtime_noDataAndAutoStop() throws Exception {
String jobId = "realtime-job-auto-stop";
String datafeedId = jobId + "-datafeed";
startRealtime(jobId, randomIntBetween(1, 3));
// Datafeed should auto-stop...
assertBusy(() -> {
GetDatafeedsStatsAction.Request request = new GetDatafeedsStatsAction.Request(datafeedId);
GetDatafeedsStatsAction.Response response = client().execute(GetDatafeedsStatsAction.INSTANCE, request).actionGet();
assertThat(response.getResponse().results().get(0).getDatafeedState(), equalTo(DatafeedState.STOPPED));
});
// ...and should have auto-closed the job too
assertBusy(() -> {
GetJobsStatsAction.Request request = new GetJobsStatsAction.Request(jobId);
GetJobsStatsAction.Response response = client().execute(GetJobsStatsAction.INSTANCE, request).actionGet();
assertThat(response.getResponse().results().get(0).getState(), equalTo(JobState.CLOSED));
});
}
public void testRealtime_multipleStopCalls() throws Exception {
String jobId = "realtime-job-multiple-stop";
final String datafeedId = jobId + "-datafeed";
@ -402,13 +423,22 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase {
}
private void startRealtime(String jobId) throws Exception {
startRealtime(jobId, null);
}
private void startRealtime(String jobId, Integer maxEmptySearches) throws Exception {
client().admin().indices().prepareCreate("data")
.addMapping("type", "time", "type=date")
.get();
long numDocs1 = randomIntBetween(32, 2048);
long now = System.currentTimeMillis();
long lastWeek = now - 604800000;
indexDocs(logger, "data", numDocs1, lastWeek, now);
long numDocs1;
if (maxEmptySearches == null) {
numDocs1 = randomIntBetween(32, 2048);
long lastWeek = now - 604800000;
indexDocs(logger, "data", numDocs1, lastWeek, now);
} else {
numDocs1 = 0;
}
Job.Builder job = createScheduledJob(jobId);
registerJob(job);
@ -416,7 +446,12 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase {
openJob(job.getId());
assertBusy(() -> assertEquals(getJobStats(job.getId()).get(0).getState(), JobState.OPENED));
DatafeedConfig datafeedConfig = createDatafeed(job.getId() + "-datafeed", job.getId(), Collections.singletonList("data"));
DatafeedConfig.Builder datafeedConfigBuilder =
createDatafeedBuilder(job.getId() + "-datafeed", job.getId(), Collections.singletonList("data"));
if (maxEmptySearches != null) {
datafeedConfigBuilder.setMaxEmptySearches(maxEmptySearches);
}
DatafeedConfig datafeedConfig = datafeedConfigBuilder.build();
registerDatafeed(datafeedConfig);
putDatafeed(datafeedConfig);
startDatafeed(datafeedConfig.getId(), 0L, null);
@ -426,9 +461,15 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase {
assertThat(dataCounts.getOutOfOrderTimeStampCount(), equalTo(0L));
});
long numDocs2 = randomIntBetween(2, 64);
now = System.currentTimeMillis();
indexDocs(logger, "data", numDocs2, now + 5000, now + 6000);
long numDocs2;
if (maxEmptySearches == null) {
numDocs2 = randomIntBetween(2, 64);
indexDocs(logger, "data", numDocs2, now + 5000, now + 6000);
} else {
numDocs2 = 0;
}
assertBusy(() -> {
DataCounts dataCounts = getDataCounts(job.getId());
assertThat(dataCounts.getProcessedRecordCount(), equalTo(numDocs1 + numDocs2));

View File

@ -64,6 +64,7 @@ class DatafeedJob {
private final DatafeedTimingStatsReporter timingStatsReporter;
private final Supplier<Long> currentTimeSupplier;
private final DelayedDataDetector delayedDataDetector;
private final Integer maxEmptySearches;
private volatile long lookbackStartTimeMs;
private volatile long latestFinalBucketEndTimeMs;
@ -73,11 +74,12 @@ class DatafeedJob {
private volatile Long lastEndTimeMs;
private AtomicBoolean running = new AtomicBoolean(true);
private volatile boolean isIsolated;
private volatile boolean haveEverSeenData;
DatafeedJob(String jobId, DataDescription dataDescription, long frequencyMs, long queryDelayMs,
DataExtractorFactory dataExtractorFactory, DatafeedTimingStatsReporter timingStatsReporter, Client client,
AnomalyDetectionAuditor auditor, Supplier<Long> currentTimeSupplier, DelayedDataDetector delayedDataDetector,
long latestFinalBucketEndTimeMs, long latestRecordTimeMs) {
Integer maxEmptySearches, long latestFinalBucketEndTimeMs, long latestRecordTimeMs, boolean haveSeenDataPreviously) {
this.jobId = jobId;
this.dataDescription = Objects.requireNonNull(dataDescription);
this.frequencyMs = frequencyMs;
@ -88,11 +90,13 @@ class DatafeedJob {
this.auditor = auditor;
this.currentTimeSupplier = currentTimeSupplier;
this.delayedDataDetector = delayedDataDetector;
this.maxEmptySearches = maxEmptySearches;
this.latestFinalBucketEndTimeMs = latestFinalBucketEndTimeMs;
long lastEndTime = Math.max(latestFinalBucketEndTimeMs, latestRecordTimeMs);
if (lastEndTime > 0) {
lastEndTimeMs = lastEndTime;
}
this.haveEverSeenData = haveSeenDataPreviously;
}
void isolate() {
@ -108,6 +112,10 @@ class DatafeedJob {
return jobId;
}
public Integer getMaxEmptySearches() {
return maxEmptySearches;
}
public void finishReportingTimingStats() {
timingStatsReporter.finishReporting();
}
@ -380,6 +388,7 @@ class DatafeedJob {
break;
}
recordCount += counts.getProcessedRecordCount();
haveEverSeenData |= (recordCount > 0);
if (counts.getLatestRecordTimeStamp() != null) {
lastEndTimeMs = counts.getLatestRecordTimeStamp().getTime();
}
@ -406,7 +415,7 @@ class DatafeedJob {
}
if (recordCount == 0) {
throw new EmptyDataCountException(nextRealtimeTimestamp());
throw new EmptyDataCountException(nextRealtimeTimestamp(), haveEverSeenData);
}
}
@ -509,10 +518,11 @@ class DatafeedJob {
static class EmptyDataCountException extends RuntimeException {
final long nextDelayInMsSinceEpoch;
final boolean haveEverSeenData;
EmptyDataCountException(long nextDelayInMsSinceEpoch) {
EmptyDataCountException(long nextDelayInMsSinceEpoch, boolean haveEverSeenData) {
this.nextDelayInMsSinceEpoch = nextDelayInMsSinceEpoch;
this.haveEverSeenData = haveEverSeenData;
}
}
}

View File

@ -92,8 +92,10 @@ public class DatafeedJobBuilder {
auditor,
currentTimeSupplier,
delayedDataDetector,
datafeedConfigHolder.get().getMaxEmptySearches(),
context.latestFinalBucketEndMs,
context.latestRecordTimeMs);
context.latestRecordTimeMs,
context.haveSeenDataPreviously);
listener.onResponse(datafeedJob);
};
@ -128,6 +130,7 @@ public class DatafeedJobBuilder {
if (dataCounts.getLatestRecordTimeStamp() != null) {
context.latestRecordTimeMs = dataCounts.getLatestRecordTimeStamp().getTime();
}
context.haveSeenDataPreviously = (dataCounts.getInputRecordCount() > 0);
jobResultsProvider.datafeedTimingStats(jobHolder.get().getId(), datafeedTimingStatsHandler, listener::onFailure);
};
@ -223,6 +226,7 @@ public class DatafeedJobBuilder {
private static class Context {
volatile long latestFinalBucketEndMs = -1L;
volatile long latestRecordTimeMs = -1L;
volatile boolean haveSeenDataPreviously;
volatile DataExtractorFactory dataExtractorFactory;
volatile DatafeedTimingStatsReporter timingStatsReporter;
}

View File

@ -80,7 +80,6 @@ public class DatafeedManager {
clusterService.addListener(taskRunner);
}
public void run(TransportStartDatafeedAction.DatafeedTask task, Consumer<Exception> finishHandler) {
String datafeedId = task.getDatafeedId();
@ -233,7 +232,7 @@ public class DatafeedManager {
long nextDelayInMsSinceEpoch;
try {
nextDelayInMsSinceEpoch = holder.executeRealTime();
holder.problemTracker.reportNoneEmptyCount();
holder.problemTracker.reportNonEmptyDataCount();
} catch (DatafeedJob.ExtractionProblemException e) {
nextDelayInMsSinceEpoch = e.nextDelayInMsSinceEpoch;
holder.problemTracker.reportExtractionProblem(e.getCause().getMessage());
@ -245,8 +244,15 @@ public class DatafeedManager {
return;
}
} catch (DatafeedJob.EmptyDataCountException e) {
int emptyDataCount = holder.problemTracker.reportEmptyDataCount();
if (e.haveEverSeenData == false && holder.shouldStopAfterEmptyData(emptyDataCount)) {
logger.warn("Datafeed for [" + jobId + "] has seen no data in [" + emptyDataCount
+ "] attempts, and never seen any data previously, so stopping...");
// In this case we auto-close the job, as though a lookback-only datafeed stopped
holder.stop("no_data", TimeValue.timeValueSeconds(20), e, true);
return;
}
nextDelayInMsSinceEpoch = e.nextDelayInMsSinceEpoch;
holder.problemTracker.reportEmptyDataCount();
} catch (Exception e) {
logger.error("Unexpected datafeed failure for job [" + jobId + "] stopping...", e);
holder.stop("general_realtime_error", TimeValue.timeValueSeconds(20), e);
@ -303,7 +309,7 @@ public class DatafeedManager {
// To ensure that we wait until lookback / realtime search has completed before we stop the datafeed
private final ReentrantLock datafeedJobLock = new ReentrantLock(true);
private final DatafeedJob datafeedJob;
private final boolean autoCloseJob;
private final boolean defaultAutoCloseJob;
private final ProblemTracker problemTracker;
private final Consumer<Exception> finishHandler;
volatile Scheduler.Cancellable cancellable;
@ -315,11 +321,16 @@ public class DatafeedManager {
this.allocationId = task.getAllocationId();
this.datafeedId = datafeedId;
this.datafeedJob = datafeedJob;
this.autoCloseJob = task.isLookbackOnly();
this.defaultAutoCloseJob = task.isLookbackOnly();
this.problemTracker = problemTracker;
this.finishHandler = finishHandler;
}
boolean shouldStopAfterEmptyData(int emptyDataCount) {
Integer emptyDataCountToStopAt = datafeedJob.getMaxEmptySearches();
return emptyDataCountToStopAt != null && emptyDataCount >= emptyDataCountToStopAt;
}
String getJobId() {
return datafeedJob.getJobId();
}
@ -333,6 +344,10 @@ public class DatafeedManager {
}
public void stop(String source, TimeValue timeout, Exception e) {
stop(source, timeout, e, defaultAutoCloseJob);
}
public void stop(String source, TimeValue timeout, Exception e, boolean autoCloseJob) {
if (isNodeShuttingDown) {
return;
}

View File

@ -74,16 +74,14 @@ class ProblemTracker {
* Updates the tracking of empty data cycles. If the number of consecutive empty data
* cycles reaches {@code EMPTY_DATA_WARN_COUNT}, a warning is reported.
*/
public void reportEmptyDataCount() {
if (emptyDataCount < EMPTY_DATA_WARN_COUNT) {
emptyDataCount++;
if (emptyDataCount == EMPTY_DATA_WARN_COUNT) {
auditor.warning(jobId, Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_NO_DATA));
}
public int reportEmptyDataCount() {
if (++emptyDataCount == EMPTY_DATA_WARN_COUNT) {
auditor.warning(jobId, Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_NO_DATA));
}
return emptyDataCount;
}
public void reportNoneEmptyCount() {
public void reportNonEmptyDataCount() {
if (emptyDataCount >= EMPTY_DATA_WARN_COUNT) {
auditor.info(jobId, Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_DATA_SEEN_AGAIN));
}

View File

@ -133,7 +133,7 @@ public class DatafeedJobTests extends ESTestCase {
}
public void testLookBackRunWithEndTime() throws Exception {
DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1);
DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1, randomBoolean());
assertNull(datafeedJob.runLookBack(0L, 1000L));
verify(dataExtractorFactory).newExtractor(0L, 1000L);
@ -145,7 +145,7 @@ public class DatafeedJobTests extends ESTestCase {
public void testSetIsolated() throws Exception {
currentTime = 2000L;
DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1);
DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1, randomBoolean());
datafeedJob.isolate();
assertNull(datafeedJob.runLookBack(0L, null));
@ -158,7 +158,7 @@ public class DatafeedJobTests extends ESTestCase {
currentTime = 2000L;
long frequencyMs = 1000;
long queryDelayMs = 500;
DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, -1, -1);
DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, -1, -1, randomBoolean());
long next = datafeedJob.runLookBack(0L, null);
assertEquals(2000 + frequencyMs + queryDelayMs + 100, next);
@ -181,7 +181,7 @@ public class DatafeedJobTests extends ESTestCase {
long frequencyMs = 1000;
long queryDelayMs = 500;
DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, latestFinalBucketEndTimeMs, latestRecordTimeMs);
DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, latestFinalBucketEndTimeMs, latestRecordTimeMs, true);
long next = datafeedJob.runLookBack(0L, null);
assertEquals(10000 + frequencyMs + queryDelayMs + 100, next);
@ -206,7 +206,7 @@ public class DatafeedJobTests extends ESTestCase {
long frequencyMs = 1000;
long queryDelayMs = 500;
DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, latestFinalBucketEndTimeMs, latestRecordTimeMs);
DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, latestFinalBucketEndTimeMs, latestRecordTimeMs, true);
datafeedJob.runLookBack(currentTime, null);
// advance time
@ -238,7 +238,7 @@ public class DatafeedJobTests extends ESTestCase {
currentTime = 60000L;
long frequencyMs = 100;
long queryDelayMs = 1000;
DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, 1000, -1);
DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, 1000, -1, false);
long next = datafeedJob.runRealtime();
assertEquals(currentTime + frequencyMs + 100, next);
@ -344,7 +344,7 @@ public class DatafeedJobTests extends ESTestCase {
public void testEmptyDataCountGivenlookback() throws Exception {
when(dataExtractor.hasNext()).thenReturn(false);
DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1);
DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1, false);
expectThrows(DatafeedJob.EmptyDataCountException.class, () -> datafeedJob.runLookBack(0L, 1000L));
verify(client, times(1)).execute(same(FlushJobAction.INSTANCE), any());
verify(client, never()).execute(same(PersistJobAction.INSTANCE), any());
@ -355,7 +355,7 @@ public class DatafeedJobTests extends ESTestCase {
when(dataExtractor.hasNext()).thenReturn(true);
when(dataExtractor.next()).thenThrow(new IOException());
DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1);
DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1, randomBoolean());
expectThrows(DatafeedJob.ExtractionProblemException.class, () -> datafeedJob.runLookBack(0L, 1000L));
currentTime = 3001;
@ -382,7 +382,7 @@ public class DatafeedJobTests extends ESTestCase {
when(dataExtractor.getEndTime()).thenReturn(1000L);
DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1);
DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1, randomBoolean());
DatafeedJob.AnalysisProblemException analysisProblemException =
expectThrows(DatafeedJob.AnalysisProblemException.class, () -> datafeedJob.runLookBack(0L, 1000L));
assertThat(analysisProblemException.shouldStop, is(false));
@ -411,7 +411,7 @@ public class DatafeedJobTests extends ESTestCase {
when(dataExtractor.getEndTime()).thenReturn(1000L);
DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1);
DatafeedJob datafeedJob = createDatafeedJob(1000, 500, -1, -1, randomBoolean());
DatafeedJob.AnalysisProblemException analysisProblemException =
expectThrows(DatafeedJob.AnalysisProblemException.class, () -> datafeedJob.runLookBack(0L, 1000L));
assertThat(analysisProblemException.shouldStop, is(true));
@ -436,7 +436,7 @@ public class DatafeedJobTests extends ESTestCase {
currentTime = 60000L;
long frequencyMs = 100;
long queryDelayMs = 1000;
DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, 1000, -1);
DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, 1000, -1, randomBoolean());
DatafeedJob.AnalysisProblemException analysisProblemException =
expectThrows(DatafeedJob.AnalysisProblemException.class, () -> datafeedJob.runRealtime());
assertThat(analysisProblemException.shouldStop, is(false));
@ -448,16 +448,17 @@ public class DatafeedJobTests extends ESTestCase {
currentTime = 60000L;
long frequencyMs = 100;
long queryDelayMs = 1000;
DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, 1000, -1);
DatafeedJob datafeedJob = createDatafeedJob(frequencyMs, queryDelayMs, 1000, -1, randomBoolean());
DatafeedJob.AnalysisProblemException analysisProblemException =
expectThrows(DatafeedJob.AnalysisProblemException.class, () -> datafeedJob.runRealtime());
assertThat(analysisProblemException.shouldStop, is(true));
}
private DatafeedJob createDatafeedJob(long frequencyMs, long queryDelayMs, long latestFinalBucketEndTimeMs,
long latestRecordTimeMs) {
long latestRecordTimeMs, boolean haveSeenDataPreviously) {
Supplier<Long> currentTimeSupplier = () -> currentTime;
return new DatafeedJob(jobId, dataDescription.build(), frequencyMs, queryDelayMs, dataExtractorFactory, timingStatsReporter,
client, auditor, currentTimeSupplier, delayedDataDetector, latestFinalBucketEndTimeMs, latestRecordTimeMs);
client, auditor, currentTimeSupplier, delayedDataDetector, null, latestFinalBucketEndTimeMs, latestRecordTimeMs,
haveSeenDataPreviously);
}
}

View File

@ -114,6 +114,7 @@ public class DatafeedManagerTests extends ESTestCase {
when(datafeedJob.isRunning()).thenReturn(true);
when(datafeedJob.stop()).thenReturn(true);
when(datafeedJob.getJobId()).thenReturn(job.getId());
when(datafeedJob.getMaxEmptySearches()).thenReturn(null);
DatafeedJobBuilder datafeedJobBuilder = mock(DatafeedJobBuilder.class);
doAnswer(invocationOnMock -> {
@SuppressWarnings("rawtypes")
@ -133,7 +134,7 @@ public class DatafeedManagerTests extends ESTestCase {
}
public void testLookbackOnly_WarnsWhenNoDataIsRetrieved() throws Exception {
when(datafeedJob.runLookBack(0L, 60000L)).thenThrow(new DatafeedJob.EmptyDataCountException(0L));
when(datafeedJob.runLookBack(0L, 60000L)).thenThrow(new DatafeedJob.EmptyDataCountException(0L, false));
Consumer<Exception> handler = mockConsumer();
DatafeedTask task = createDatafeedTask("datafeed_id", 0L, 60000L);
datafeedManager.run(task, handler);
@ -176,8 +177,8 @@ public class DatafeedManagerTests extends ESTestCase {
return mock(Scheduler.ScheduledCancellable.class);
}).when(threadPool).schedule(any(), any(), any());
when(datafeedJob.runLookBack(anyLong(), anyLong())).thenThrow(new DatafeedJob.EmptyDataCountException(0L));
when(datafeedJob.runRealtime()).thenThrow(new DatafeedJob.EmptyDataCountException(0L));
when(datafeedJob.runLookBack(anyLong(), anyLong())).thenThrow(new DatafeedJob.EmptyDataCountException(0L, false));
when(datafeedJob.runRealtime()).thenThrow(new DatafeedJob.EmptyDataCountException(0L, false));
Consumer<Exception> handler = mockConsumer();
DatafeedTask task = createDatafeedTask("datafeed_id", 0L, null);

View File

@ -85,7 +85,7 @@ public class ProblemTrackerTests extends ESTestCase {
for (int i = 0; i < 9; i++) {
problemTracker.reportEmptyDataCount();
}
problemTracker.reportNoneEmptyCount();
problemTracker.reportNonEmptyDataCount();
Mockito.verifyNoMoreInteractions(auditor);
}
@ -94,7 +94,7 @@ public class ProblemTrackerTests extends ESTestCase {
for (int i = 0; i < 10; i++) {
problemTracker.reportEmptyDataCount();
}
problemTracker.reportNoneEmptyCount();
problemTracker.reportNonEmptyDataCount();
verify(auditor).warning("foo", "Datafeed has been retrieving no data for a while");
verify(auditor).info("foo", "Datafeed has started retrieving data again");

View File

@ -30,6 +30,7 @@ import java.util.concurrent.atomic.AtomicReference;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.xpack.core.XPackSettings.API_KEY_SERVICE_ENABLED_SETTING;
import static org.elasticsearch.xpack.core.XPackSettings.FIPS_MODE_ENABLED;
import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED;
import static org.elasticsearch.xpack.core.XPackSettings.TOKEN_SERVICE_ENABLED_SETTING;
import static org.elasticsearch.xpack.core.XPackSettings.TRANSPORT_SSL_ENABLED;
@ -95,6 +96,7 @@ public class SecurityFeatureSet implements XPackFeatureSet {
Map<String, Object> auditUsage = auditUsage(settings);
Map<String, Object> ipFilterUsage = ipFilterUsage(ipFilter);
Map<String, Object> anonymousUsage = singletonMap("enabled", AnonymousUser.isAnonymousEnabled(settings));
Map<String, Object> fips140Usage = fips140Usage(settings);
final AtomicReference<Map<String, Object>> rolesUsageRef = new AtomicReference<>();
final AtomicReference<Map<String, Object>> roleMappingUsageRef = new AtomicReference<>();
@ -104,7 +106,7 @@ public class SecurityFeatureSet implements XPackFeatureSet {
if (countDown.countDown()) {
listener.onResponse(new SecurityFeatureSetUsage(available(), enabled(), realmsUsageRef.get(), rolesUsageRef.get(),
roleMappingUsageRef.get(), sslUsage, auditUsage, ipFilterUsage, anonymousUsage, tokenServiceUsage,
apiKeyServiceUsage));
apiKeyServiceUsage, fips140Usage));
}
};
@ -184,4 +186,7 @@ public class SecurityFeatureSet implements XPackFeatureSet {
return ipFilter.usageStats();
}
static Map<String, Object> fips140Usage(Settings settings) {
return singletonMap("enabled", FIPS_MODE_ENABLED.get(settings));
}
}

View File

@ -149,6 +149,10 @@ public class SecurityFeatureSetTests extends ESTestCase {
if (anonymousEnabled) {
settings.put(AnonymousUser.ROLES_SETTING.getKey(), "foo");
}
final boolean fips140Enabled = randomBoolean();
if (fips140Enabled) {
settings.put("xpack.security.fips_mode.enabled", true);
}
SecurityFeatureSet featureSet = new SecurityFeatureSet(settings.build(), licenseState,
realms, rolesStore, roleMappingStore, ipFilter);
@ -216,6 +220,9 @@ public class SecurityFeatureSetTests extends ESTestCase {
// anonymous
assertThat(source.getValue("anonymous.enabled"), is(anonymousEnabled));
// FIPS 140
assertThat(source.getValue("fips_140.enabled"), is(fips140Enabled));
} else {
assertThat(source.getValue("realms"), is(nullValue()));
assertThat(source.getValue("ssl"), is(nullValue()));

View File

@ -181,8 +181,10 @@ setup:
"indexes":["index-foo"],
"scroll_size": 2000,
"frequency": "1m",
"query_delay": "30s"
"query_delay": "30s",
"max_empty_searches": 42
}
- match: { max_empty_searches: 42 }
- do:
ml.update_datafeed:
@ -192,7 +194,8 @@ setup:
"indexes":["index-*"],
"scroll_size": 10000,
"frequency": "2m",
"query_delay": "0s"
"query_delay": "0s",
"max_empty_searches": -1
}
- match: { datafeed_id: "test-datafeed-1" }
- match: { job_id: "datafeeds-crud-1" }
@ -200,6 +203,7 @@ setup:
- match: { scroll_size: 10000 }
- match: { frequency: "2m" }
- match: { query_delay: "0s" }
- is_false: max_empty_searches
---
"Test update datafeed to point to different job":
@ -364,7 +368,8 @@ setup:
}
}
}
}
},
"max_empty_searches": -1
}
- do:
ml.get_datafeeds:
@ -374,6 +379,7 @@ setup:
- match: { datafeeds.0.aggregations.histogram_buckets.aggs.@timestamp.max.field: "@timestamp" }
- match: { datafeeds.0.aggregations.histogram_buckets.aggs.bytes_in_avg.avg.field: "system.network.in.bytes" }
- match: { datafeeds.0.aggregations.histogram_buckets.aggs.non_negative_bytes.bucket_script.buckets_path.bytes: "bytes_in_derivative" }
- is_false: max_empty_searches
---
"Test delete datafeed":

View File

@ -46,6 +46,8 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.elasticsearch.xpack.core.transform.TransformMessages.CANNOT_STOP_FAILED_TRANSFORM;
@ -196,6 +198,13 @@ public class TransportStopTransformAction extends TransportTasksAction<Transform
);
return ActionListener.wrap(
response -> {
// If there were failures attempting to stop the tasks, we don't know if they will actually stop.
// It is better to respond to the user now than allow for the persistent task waiting to timeout
if (response.getTaskFailures().isEmpty() == false || response.getNodeFailures().isEmpty() == false) {
RestStatus status = firstNotOKStatus(response.getTaskFailures(), response.getNodeFailures());
listener.onFailure(buildException(response.getTaskFailures(), response.getNodeFailures(), status));
return;
}
// Wait until the persistent task is stopped
// Switch over to Generic threadpool so we don't block the network thread
threadPool.generic().execute(() ->
@ -205,6 +214,46 @@ public class TransportStopTransformAction extends TransportTasksAction<Transform
);
}
static ElasticsearchStatusException buildException(List<TaskOperationFailure> taskOperationFailures,
List<ElasticsearchException> elasticsearchExceptions,
RestStatus status) {
List<Exception> exceptions = Stream.concat(
taskOperationFailures.stream().map(TaskOperationFailure::getCause),
elasticsearchExceptions.stream()).collect(Collectors.toList());
ElasticsearchStatusException elasticsearchStatusException =
new ElasticsearchStatusException(exceptions.get(0).getMessage(), status);
for (int i = 1; i < exceptions.size(); i++) {
elasticsearchStatusException.addSuppressed(exceptions.get(i));
}
return elasticsearchStatusException;
}
static RestStatus firstNotOKStatus(List<TaskOperationFailure> taskOperationFailures, List<ElasticsearchException> exceptions) {
RestStatus status = RestStatus.OK;
for (TaskOperationFailure taskOperationFailure : taskOperationFailures) {
status = taskOperationFailure.getStatus();
if (RestStatus.OK.equals(status) == false) {
break;
}
}
if (status == RestStatus.OK) {
for (ElasticsearchException exception : exceptions) {
// As it stands right now, this will ALWAYS be INTERNAL_SERVER_ERROR.
// FailedNodeException does not overwrite the `status()` method and the logic in ElasticsearchException
// Just returns an INTERNAL_SERVER_ERROR
status = exception.status();
if (RestStatus.OK.equals(status) == false) {
break;
}
}
}
// If all the previous exceptions don't have a valid status, we have an unknown error.
return status == RestStatus.OK ? RestStatus.INTERNAL_SERVER_ERROR : status;
}
private void waitForTransformStopped(Set<String> persistentTaskIds,
TimeValue timeout,
boolean force,

View File

@ -109,7 +109,8 @@ public class TransformPersistentTasksExecutor extends PersistentTasksExecutor<Tr
IndexNameExpressionResolver resolver = new IndexNameExpressionResolver();
String[] indices = resolver.concreteIndexNames(clusterState,
IndicesOptions.lenientExpandOpen(),
TransformInternalIndexConstants.INDEX_NAME_PATTERN);
TransformInternalIndexConstants.INDEX_NAME_PATTERN,
TransformInternalIndexConstants.INDEX_NAME_PATTERN_DEPRECATED);
List<String> unavailableIndices = new ArrayList<>(indices.length);
for (String index : indices) {
IndexRoutingTable routingTable = clusterState.getRoutingTable().index(index);

View File

@ -5,12 +5,15 @@
*/
package org.elasticsearch.xpack.transform.action;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.Version;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.core.indexing.IndexerState;
import org.elasticsearch.xpack.core.transform.TransformMessages;
@ -18,8 +21,10 @@ import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams;
import org.elasticsearch.xpack.core.transform.transforms.TransformState;
import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import static org.elasticsearch.rest.RestStatus.CONFLICT;
import static org.hamcrest.Matchers.equalTo;
@ -91,4 +96,67 @@ public class TransportStopTransformActionTests extends ESTestCase {
"task has failed")));
}
public void testFirstNotOKStatus() {
List<ElasticsearchException> nodeFailures = new ArrayList<>();
List<TaskOperationFailure> taskOperationFailures = new ArrayList<>();
nodeFailures.add(new ElasticsearchException("nodefailure",
new ElasticsearchStatusException("failure", RestStatus.UNPROCESSABLE_ENTITY)));
taskOperationFailures.add(new TaskOperationFailure("node",
1,
new ElasticsearchStatusException("failure", RestStatus.BAD_REQUEST)));
assertThat(TransportStopTransformAction.firstNotOKStatus(Collections.emptyList(), Collections.emptyList()),
equalTo(RestStatus.INTERNAL_SERVER_ERROR));
assertThat(TransportStopTransformAction.firstNotOKStatus(taskOperationFailures, Collections.emptyList()),
equalTo(RestStatus.BAD_REQUEST));
assertThat(TransportStopTransformAction.firstNotOKStatus(taskOperationFailures, nodeFailures),
equalTo(RestStatus.BAD_REQUEST));
assertThat(TransportStopTransformAction.firstNotOKStatus(taskOperationFailures,
Collections.singletonList(new ElasticsearchException(new ElasticsearchStatusException("not failure", RestStatus.OK)))),
equalTo(RestStatus.BAD_REQUEST));
assertThat(TransportStopTransformAction.firstNotOKStatus(
Collections.singletonList(new TaskOperationFailure(
"node",
1,
new ElasticsearchStatusException("not failure", RestStatus.OK))),
nodeFailures),
equalTo(RestStatus.INTERNAL_SERVER_ERROR));
assertThat(TransportStopTransformAction.firstNotOKStatus(
Collections.emptyList(),
nodeFailures),
equalTo(RestStatus.INTERNAL_SERVER_ERROR));
}
public void testBuildException() {
List<ElasticsearchException> nodeFailures = new ArrayList<>();
List<TaskOperationFailure> taskOperationFailures = new ArrayList<>();
nodeFailures.add(new ElasticsearchException("node failure"));
taskOperationFailures.add(new TaskOperationFailure("node",
1,
new ElasticsearchStatusException("task failure", RestStatus.BAD_REQUEST)));
RestStatus status = CONFLICT;
ElasticsearchStatusException statusException =
TransportStopTransformAction.buildException(taskOperationFailures, nodeFailures, status);
assertThat(statusException.status(), equalTo(status));
assertThat(statusException.getMessage(), equalTo(taskOperationFailures.get(0).getCause().getMessage()));
assertThat(statusException.getSuppressed().length, equalTo(1));
statusException = TransportStopTransformAction.buildException(Collections.emptyList(), nodeFailures, status);
assertThat(statusException.status(), equalTo(status));
assertThat(statusException.getMessage(), equalTo(nodeFailures.get(0).getMessage()));
assertThat(statusException.getSuppressed().length, equalTo(0));
statusException = TransportStopTransformAction.buildException(taskOperationFailures, Collections.emptyList(), status);
assertThat(statusException.status(), equalTo(status));
assertThat(statusException.getMessage(), equalTo(taskOperationFailures.get(0).getCause().getMessage()));
assertThat(statusException.getSuppressed().length, equalTo(0));
}
}

Some files were not shown because too many files have changed in this diff Show More