mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-25 22:36:20 +00:00
Merge branch 'master' into zen2
This commit is contained in:
commit
0e1a12122c
@ -245,7 +245,7 @@ class VersionPropertiesLoader {
|
||||
elasticsearch
|
||||
)
|
||||
}
|
||||
String qualifier = systemProperties.getProperty("build.version_qualifier", "alpha1");
|
||||
String qualifier = systemProperties.getProperty("build.version_qualifier", "");
|
||||
if (qualifier.isEmpty() == false) {
|
||||
if (qualifier.matches("(alpha|beta|rc)\\d+") == false) {
|
||||
throw new IllegalStateException("Invalid qualifier: " + qualifier)
|
||||
|
@ -66,7 +66,7 @@ class BuildPlugin implements Plugin<Project> {
|
||||
void apply(Project project) {
|
||||
if (project.pluginManager.hasPlugin('elasticsearch.standalone-rest-test')) {
|
||||
throw new InvalidUserDataException('elasticsearch.standalone-test, '
|
||||
+ 'elasticearch.standalone-rest-test, and elasticsearch.build '
|
||||
+ 'elasticsearch.standalone-rest-test, and elasticsearch.build '
|
||||
+ 'are mutually exclusive')
|
||||
}
|
||||
final String minimumGradleVersion
|
||||
|
@ -1,5 +1,5 @@
|
||||
elasticsearch = 7.0.0
|
||||
lucene = 8.0.0-snapshot-31d7dfe6b1
|
||||
lucene = 8.0.0-snapshot-6d9c714052
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.7
|
||||
|
@ -1417,6 +1417,38 @@ public class RestHighLevelClient implements Closeable {
|
||||
throw new IOException("Unable to parse response body for " + response, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines a helper method for requests that can 404 and in which case will return an empty Optional
|
||||
* otherwise tries to parse the response body
|
||||
*/
|
||||
protected final <Req extends Validatable, Resp> Optional<Resp> performRequestAndParseOptionalEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser
|
||||
) throws IOException {
|
||||
Optional<ValidationException> validationException = request.validate();
|
||||
if (validationException != null && validationException.isPresent()) {
|
||||
throw validationException.get();
|
||||
}
|
||||
Request req = requestConverter.apply(request);
|
||||
req.setOptions(options);
|
||||
Response response;
|
||||
try {
|
||||
response = client.performRequest(req);
|
||||
} catch (ResponseException e) {
|
||||
if (RestStatus.NOT_FOUND.getStatus() == e.getResponse().getStatusLine().getStatusCode()) {
|
||||
return Optional.empty();
|
||||
}
|
||||
throw parseResponseException(e);
|
||||
}
|
||||
|
||||
try {
|
||||
return Optional.of(parseEntity(response.getEntity(), entityParser));
|
||||
} catch (Exception e) {
|
||||
throw new IOException("Unable to parse response body for " + response, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation
|
||||
@ -1538,6 +1570,62 @@ public class RestHighLevelClient implements Closeable {
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Async request which returns empty Optionals in the case of 404s or parses entity into an Optional
|
||||
*/
|
||||
protected final <Req extends Validatable, Resp> void performRequestAsyncAndParseOptionalEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
ActionListener<Optional<Resp>> listener) {
|
||||
Optional<ValidationException> validationException = request.validate();
|
||||
if (validationException != null && validationException.isPresent()) {
|
||||
listener.onFailure(validationException.get());
|
||||
return;
|
||||
}
|
||||
Request req;
|
||||
try {
|
||||
req = requestConverter.apply(request);
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
req.setOptions(options);
|
||||
ResponseListener responseListener = wrapResponseListener404sOptional(response -> parseEntity(response.getEntity(),
|
||||
entityParser), listener);
|
||||
client.performRequestAsync(req, responseListener);
|
||||
}
|
||||
|
||||
final <Resp> ResponseListener wrapResponseListener404sOptional(CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
ActionListener<Optional<Resp>> actionListener) {
|
||||
return new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
try {
|
||||
actionListener.onResponse(Optional.of(responseConverter.apply(response)));
|
||||
} catch (Exception e) {
|
||||
IOException ioe = new IOException("Unable to parse response body for " + response, e);
|
||||
onFailure(ioe);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
if (exception instanceof ResponseException) {
|
||||
ResponseException responseException = (ResponseException) exception;
|
||||
Response response = responseException.getResponse();
|
||||
if (RestStatus.NOT_FOUND.getStatus() == response.getStatusLine().getStatusCode()) {
|
||||
actionListener.onResponse(Optional.empty());
|
||||
} else {
|
||||
actionListener.onFailure(parseResponseException(responseException));
|
||||
}
|
||||
} else {
|
||||
actionListener.onFailure(exception);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a {@link ResponseException} obtained from the low level REST client into an {@link ElasticsearchException}.
|
||||
|
@ -22,6 +22,8 @@ package org.elasticsearch.client;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.client.rollup.DeleteRollupJobRequest;
|
||||
import org.elasticsearch.client.rollup.DeleteRollupJobResponse;
|
||||
import org.elasticsearch.client.rollup.GetRollupIndexCapsRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupIndexCapsResponse;
|
||||
import org.elasticsearch.client.rollup.GetRollupJobRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupJobResponse;
|
||||
import org.elasticsearch.client.rollup.GetRollupCapsRequest;
|
||||
@ -219,4 +221,40 @@ public class RollupClient {
|
||||
listener,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the Rollup Index Capabilities of a rollup index or pattern
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-index-caps.html">
|
||||
* the docs</a> for more.
|
||||
* @param request the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
public GetRollupIndexCapsResponse getRollupIndexCapabilities(GetRollupIndexCapsRequest request,
|
||||
RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(request,
|
||||
RollupRequestConverters::getRollupIndexCaps,
|
||||
options,
|
||||
GetRollupIndexCapsResponse::fromXContent,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously Get the Rollup Index Capabilities of a rollup index or pattern
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/master/rollup-get-rollup-index-caps.html">
|
||||
* the docs</a> for more.
|
||||
* @param request the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public void getRollupIndexCapabilitiesAsync(GetRollupIndexCapsRequest request, RequestOptions options,
|
||||
ActionListener<GetRollupIndexCapsResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request,
|
||||
RollupRequestConverters::getRollupIndexCaps,
|
||||
options,
|
||||
GetRollupIndexCapsResponse::fromXContent,
|
||||
listener,
|
||||
Collections.emptySet());
|
||||
}
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.client.rollup.DeleteRollupJobRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupCapsRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupIndexCapsRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupJobRequest;
|
||||
import org.elasticsearch.client.rollup.PutRollupJobRequest;
|
||||
import org.elasticsearch.client.rollup.StartRollupJobRequest;
|
||||
@ -85,4 +86,14 @@ final class RollupRequestConverters {
|
||||
request.setEntity(createEntity(getRollupCapsRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request getRollupIndexCaps(final GetRollupIndexCapsRequest getRollupIndexCapsRequest) throws IOException {
|
||||
String endpoint = new RequestConverters.EndpointBuilder()
|
||||
.addCommaSeparatedPathParts(getRollupIndexCapsRequest.indices())
|
||||
.addPathPartAsIs("_xpack", "rollup", "data")
|
||||
.build();
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
|
||||
request.setEntity(createEntity(getRollupIndexCapsRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
}
|
||||
|
@ -24,8 +24,11 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksReque
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.client.tasks.GetTaskRequest;
|
||||
import org.elasticsearch.client.tasks.GetTaskResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Optional;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
|
||||
@ -67,6 +70,34 @@ public final class TasksClient {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request, TasksRequestConverters::listTasks, options,
|
||||
ListTasksResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a task using the Task Management API.
|
||||
* See
|
||||
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html"> Task Management API on elastic.co</a>
|
||||
* @param request the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
public Optional<GetTaskResponse> get(GetTaskRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseOptionalEntity(request, TasksRequestConverters::getTask, options,
|
||||
GetTaskResponse::fromXContent);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a task using the Task Management API.
|
||||
* See
|
||||
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/tasks.html"> Task Management API on elastic.co</a>
|
||||
* @param request the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener an actionlistener that takes an optional response (404s are returned as an empty Optional)
|
||||
*/
|
||||
public void getAsync(GetTaskRequest request, RequestOptions options, ActionListener<Optional<GetTaskResponse>> listener) {
|
||||
|
||||
restHighLevelClient.performRequestAsyncAndParseOptionalEntity(request, TasksRequestConverters::getTask, options,
|
||||
GetTaskResponse::fromXContent, listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancel one or more cluster tasks using the Task Management API.
|
||||
|
@ -23,6 +23,8 @@ import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.client.RequestConverters.EndpointBuilder;
|
||||
import org.elasticsearch.client.tasks.GetTaskRequest;
|
||||
|
||||
final class TasksRequestConverters {
|
||||
|
||||
@ -54,4 +56,16 @@ final class TasksRequestConverters {
|
||||
.putParam("group_by", "none");
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request getTask(GetTaskRequest getTaskRequest) {
|
||||
String endpoint = new EndpointBuilder().addPathPartAsIs("_tasks")
|
||||
.addPathPartAsIs(getTaskRequest.getNodeId() + ":" + Long.toString(getTaskRequest.getTaskId()))
|
||||
.build();
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
|
||||
RequestConverters.Params params = new RequestConverters.Params(request);
|
||||
params.withTimeout(getTaskRequest.getTimeout())
|
||||
.withWaitForCompletion(getTaskRequest.getWaitForCompletion());
|
||||
return request;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -18,10 +18,6 @@
|
||||
*/
|
||||
package org.elasticsearch.client.rollup;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -30,7 +26,7 @@ import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public class GetRollupCapsResponse implements ToXContentObject {
|
||||
public class GetRollupCapsResponse {
|
||||
|
||||
private final Map<String, RollableIndexCaps> jobs;
|
||||
|
||||
@ -42,16 +38,6 @@ public class GetRollupCapsResponse implements ToXContentObject {
|
||||
return jobs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startObject();
|
||||
for (Map.Entry<String, RollableIndexCaps> entry : jobs.entrySet()) {
|
||||
entry.getValue().toXContent(builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static GetRollupCapsResponse fromXContent(final XContentParser parser) throws IOException {
|
||||
Map<String, RollableIndexCaps> jobs = new HashMap<>();
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
@ -84,9 +70,4 @@ public class GetRollupCapsResponse implements ToXContentObject {
|
||||
GetRollupCapsResponse other = (GetRollupCapsResponse) obj;
|
||||
return Objects.equals(jobs, other.jobs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final String toString() {
|
||||
return Strings.toString(this);
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,95 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.rollup;
|
||||
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.Validatable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
|
||||
public class GetRollupIndexCapsRequest implements Validatable, ToXContentObject {
|
||||
private static final String INDICES = "indices";
|
||||
private static final String INDICES_OPTIONS = "indices_options";
|
||||
|
||||
private String[] indices;
|
||||
private IndicesOptions options;
|
||||
|
||||
public GetRollupIndexCapsRequest(final String... indices) {
|
||||
this(indices, IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED);
|
||||
}
|
||||
|
||||
public GetRollupIndexCapsRequest(final String[] indices, final IndicesOptions options) {
|
||||
if (indices == null || indices.length == 0) {
|
||||
throw new IllegalArgumentException("[indices] must not be null or empty");
|
||||
}
|
||||
for (String index : indices) {
|
||||
if (Strings.isNullOrEmpty(index)) {
|
||||
throw new IllegalArgumentException("[index] must not be null or empty");
|
||||
}
|
||||
}
|
||||
this.indices = indices;
|
||||
this.options = Objects.requireNonNull(options);
|
||||
}
|
||||
|
||||
public IndicesOptions indicesOptions() {
|
||||
return options;
|
||||
}
|
||||
|
||||
public String[] indices() {
|
||||
return indices;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.array(INDICES, indices);
|
||||
builder.startObject(INDICES_OPTIONS);
|
||||
{
|
||||
options.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(Arrays.hashCode(indices), options);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
GetRollupIndexCapsRequest other = (GetRollupIndexCapsRequest) obj;
|
||||
return Arrays.equals(indices, other.indices)
|
||||
&& Objects.equals(options, other.options);
|
||||
}
|
||||
}
|
@ -0,0 +1,73 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.rollup;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public class GetRollupIndexCapsResponse {
|
||||
|
||||
private final Map<String, RollableIndexCaps> jobs;
|
||||
|
||||
public GetRollupIndexCapsResponse(final Map<String, RollableIndexCaps> jobs) {
|
||||
this.jobs = Collections.unmodifiableMap(Objects.requireNonNull(jobs));
|
||||
}
|
||||
|
||||
public Map<String, RollableIndexCaps> getJobs() {
|
||||
return jobs;
|
||||
}
|
||||
|
||||
public static GetRollupIndexCapsResponse fromXContent(final XContentParser parser) throws IOException {
|
||||
Map<String, RollableIndexCaps> jobs = new HashMap<>();
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
if (token.equals(XContentParser.Token.START_OBJECT)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token.equals(XContentParser.Token.FIELD_NAME)) {
|
||||
String pattern = parser.currentName();
|
||||
|
||||
RollableIndexCaps cap = RollableIndexCaps.PARSER.apply(pattern).apply(parser, null);
|
||||
jobs.put(pattern, cap);
|
||||
}
|
||||
}
|
||||
}
|
||||
return new GetRollupIndexCapsResponse(jobs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(jobs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
GetRollupIndexCapsResponse other = (GetRollupIndexCapsResponse) obj;
|
||||
return Objects.equals(jobs, other.jobs);
|
||||
}
|
||||
}
|
@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.tasks;
|
||||
|
||||
import org.elasticsearch.client.Validatable;
|
||||
import org.elasticsearch.client.ValidationException;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
|
||||
public class GetTaskRequest implements Validatable {
|
||||
private final String nodeId;
|
||||
private final long taskId;
|
||||
private boolean waitForCompletion = false;
|
||||
private TimeValue timeout = null;
|
||||
|
||||
public GetTaskRequest(String nodeId, long taskId) {
|
||||
this.nodeId = nodeId;
|
||||
this.taskId = taskId;
|
||||
}
|
||||
|
||||
public String getNodeId() {
|
||||
return nodeId;
|
||||
}
|
||||
|
||||
public long getTaskId() {
|
||||
return taskId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request wait for all found tasks to complete?
|
||||
*/
|
||||
public boolean getWaitForCompletion() {
|
||||
return waitForCompletion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this request wait for all found tasks to complete?
|
||||
*/
|
||||
public GetTaskRequest setWaitForCompletion(boolean waitForCompletion) {
|
||||
this.waitForCompletion = waitForCompletion;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout to wait for any async actions this request must take. It must take anywhere from 0 to 2.
|
||||
*/
|
||||
public TimeValue getTimeout() {
|
||||
return timeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Timeout to wait for any async actions this request must take.
|
||||
*/
|
||||
public GetTaskRequest setTimeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<ValidationException> validate() {
|
||||
final ValidationException validationException = new ValidationException();
|
||||
if (timeout != null && !waitForCompletion) {
|
||||
validationException.addValidationError("Timeout settings are only accepted if waitForCompletion is also set");
|
||||
}
|
||||
if (validationException.validationErrors().isEmpty()) {
|
||||
return Optional.empty();
|
||||
}
|
||||
return Optional.of(validationException);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(nodeId, taskId, waitForCompletion, timeout);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
GetTaskRequest other = (GetTaskRequest) obj;
|
||||
return Objects.equals(nodeId, other.nodeId) &&
|
||||
taskId == other.taskId &&
|
||||
waitForCompletion == other.waitForCompletion &&
|
||||
Objects.equals(timeout, other.timeout);
|
||||
}
|
||||
}
|
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.tasks;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.tasks.TaskInfo;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
|
||||
public class GetTaskResponse {
|
||||
private final boolean completed;
|
||||
private final TaskInfo taskInfo;
|
||||
public static final ParseField COMPLETED = new ParseField("completed");
|
||||
public static final ParseField TASK = new ParseField("task");
|
||||
|
||||
public GetTaskResponse(boolean completed, TaskInfo taskInfo) {
|
||||
this.completed = completed;
|
||||
this.taskInfo = taskInfo;
|
||||
}
|
||||
|
||||
public boolean isCompleted() {
|
||||
return completed;
|
||||
}
|
||||
|
||||
public TaskInfo getTaskInfo() {
|
||||
return taskInfo;
|
||||
}
|
||||
|
||||
private static final ConstructingObjectParser<GetTaskResponse, Void> PARSER = new ConstructingObjectParser<>("get_task",
|
||||
true, a -> new GetTaskResponse((boolean) a[0], (TaskInfo) a[1]));
|
||||
static {
|
||||
PARSER.declareBoolean(constructorArg(), COMPLETED);
|
||||
PARSER.declareObject(constructorArg(), (p, c) -> TaskInfo.fromXContent(p), TASK);
|
||||
}
|
||||
|
||||
public static GetTaskResponse fromXContent(XContentParser parser) {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
}
|
@ -126,8 +126,11 @@ public class CustomRestHighLevelClientTests extends ESTestCase {
|
||||
"parseResponseException",
|
||||
"performRequest",
|
||||
"performRequestAndParseEntity",
|
||||
"performRequestAndParseOptionalEntity",
|
||||
"performRequestAsync",
|
||||
"performRequestAsyncAndParseEntity"};
|
||||
"performRequestAsyncAndParseEntity",
|
||||
"performRequestAsyncAndParseOptionalEntity"
|
||||
};
|
||||
|
||||
final Set<String> protectedMethods = Arrays.stream(RestHighLevelClient.class.getDeclaredMethods())
|
||||
.filter(method -> Modifier.isProtected(method.getModifiers()))
|
||||
|
@ -97,6 +97,7 @@ import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
@ -675,8 +676,7 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
"indices.put_alias",
|
||||
"mtermvectors",
|
||||
"render_search_template",
|
||||
"scripts_painless_execute",
|
||||
"tasks.get"
|
||||
"scripts_painless_execute"
|
||||
};
|
||||
//These API are not required for high-level client feature completeness
|
||||
String[] notRequiredApi = new String[] {
|
||||
@ -777,8 +777,11 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
assertThat("the return type for method [" + method + "] is incorrect",
|
||||
method.getReturnType().getSimpleName(), equalTo("boolean"));
|
||||
} else {
|
||||
assertThat("the return type for method [" + method + "] is incorrect",
|
||||
method.getReturnType().getSimpleName(), endsWith("Response"));
|
||||
// It's acceptable for 404s to be represented as empty Optionals
|
||||
if (!method.getReturnType().isAssignableFrom(Optional.class)) {
|
||||
assertThat("the return type for method [" + method + "] is incorrect",
|
||||
method.getReturnType().getSimpleName(), endsWith("Response"));
|
||||
}
|
||||
}
|
||||
|
||||
assertEquals("incorrect number of exceptions for method [" + method + "]", 1, method.getExceptionTypes().length);
|
||||
|
@ -33,6 +33,8 @@ import org.elasticsearch.client.rollup.DeleteRollupJobRequest;
|
||||
import org.elasticsearch.client.rollup.DeleteRollupJobResponse;
|
||||
import org.elasticsearch.client.rollup.GetRollupCapsRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupCapsResponse;
|
||||
import org.elasticsearch.client.rollup.GetRollupIndexCapsRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupIndexCapsResponse;
|
||||
import org.elasticsearch.client.rollup.GetRollupJobRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupJobResponse;
|
||||
import org.elasticsearch.client.rollup.GetRollupJobResponse.IndexerState;
|
||||
@ -348,4 +350,116 @@ public class RollupIT extends ESRestHighLevelClientTestCase {
|
||||
List<Map<String, Object>> valueCaps = fieldCaps.get("value").getAggs();
|
||||
assertThat(valueCaps.size(), equalTo(SUPPORTED_METRICS.size()));
|
||||
}
|
||||
|
||||
public void testGetRollupIndexCaps() throws Exception {
|
||||
final Set<Integer> values = new HashSet<>();
|
||||
double sum = 0.0d;
|
||||
int max = Integer.MIN_VALUE;
|
||||
int min = Integer.MAX_VALUE;
|
||||
|
||||
final BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
for (int minute = 0; minute < 60; minute++) {
|
||||
for (int second = 0; second < 60; second = second + 10) {
|
||||
final int value = randomIntBetween(0, 100);
|
||||
|
||||
final IndexRequest indexRequest = new IndexRequest("docs", "doc");
|
||||
indexRequest.source(jsonBuilder()
|
||||
.startObject()
|
||||
.field("value", value)
|
||||
.field("date", String.format(Locale.ROOT, "2018-01-01T00:%02d:%02dZ", minute, second))
|
||||
.endObject());
|
||||
bulkRequest.add(indexRequest);
|
||||
|
||||
values.add(value);
|
||||
sum += value;
|
||||
if (value > max) {
|
||||
max = value;
|
||||
}
|
||||
if (value < min) {
|
||||
min = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final int numDocs = bulkRequest.numberOfActions();
|
||||
|
||||
BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT);
|
||||
assertEquals(RestStatus.OK, bulkResponse.status());
|
||||
if (bulkResponse.hasFailures()) {
|
||||
for (BulkItemResponse itemResponse : bulkResponse.getItems()) {
|
||||
if (itemResponse.isFailed()) {
|
||||
logger.fatal(itemResponse.getFailureMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
assertFalse(bulkResponse.hasFailures());
|
||||
|
||||
RefreshResponse refreshResponse = highLevelClient().indices().refresh(new RefreshRequest("docs"), RequestOptions.DEFAULT);
|
||||
assertEquals(0, refreshResponse.getFailedShards());
|
||||
|
||||
final String id = randomAlphaOfLength(10);
|
||||
final String indexPattern = randomFrom("docs", "d*", "doc*");
|
||||
final String rollupIndex = randomFrom("rollup", "test");
|
||||
final String cron = "*/1 * * * * ?";
|
||||
final int pageSize = randomIntBetween(numDocs, numDocs * 10);
|
||||
// TODO expand this to also test with histogram and terms?
|
||||
final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY));
|
||||
final List<MetricConfig> metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS));
|
||||
final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600));
|
||||
|
||||
PutRollupJobRequest putRollupJobRequest =
|
||||
new PutRollupJobRequest(new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groups, metrics, timeout));
|
||||
|
||||
final RollupClient rollupClient = highLevelClient().rollup();
|
||||
PutRollupJobResponse response = execute(putRollupJobRequest, rollupClient::putRollupJob, rollupClient::putRollupJobAsync);
|
||||
assertTrue(response.isAcknowledged());
|
||||
|
||||
// wait for the PutJob api to create the index w/ metadata
|
||||
highLevelClient().cluster().health(new ClusterHealthRequest(rollupIndex).waitForYellowStatus(), RequestOptions.DEFAULT);
|
||||
|
||||
GetRollupIndexCapsRequest getRollupIndexCapsRequest = new GetRollupIndexCapsRequest(rollupIndex);
|
||||
GetRollupIndexCapsResponse capsResponse = highLevelClient().rollup()
|
||||
.getRollupIndexCapabilities(getRollupIndexCapsRequest, RequestOptions.DEFAULT);
|
||||
|
||||
assertNotNull(capsResponse);
|
||||
Map<String, RollableIndexCaps> rolledPatterns = capsResponse.getJobs();
|
||||
assertThat(rolledPatterns.size(), equalTo(1));
|
||||
|
||||
RollableIndexCaps docsPattern = rolledPatterns.get(rollupIndex);
|
||||
assertThat(docsPattern.getIndexName(), equalTo(rollupIndex));
|
||||
|
||||
List<RollupJobCaps> rollupJobs = docsPattern.getJobCaps();
|
||||
assertThat(rollupJobs.size(), equalTo(1));
|
||||
|
||||
RollupJobCaps jobCaps = rollupJobs.get(0);
|
||||
assertThat(jobCaps.getJobID(), equalTo(id));
|
||||
assertThat(jobCaps.getRollupIndex(), equalTo(rollupIndex));
|
||||
assertThat(jobCaps.getIndexPattern(), equalTo(indexPattern));
|
||||
|
||||
Map<String, RollupJobCaps.RollupFieldCaps> fieldCaps = jobCaps.getFieldCaps();
|
||||
|
||||
List<Map<String, Object>> timestampCaps = fieldCaps.get("date").getAggs();
|
||||
for (Map.Entry<String, Object> entry : timestampCaps.get(0).entrySet()) {
|
||||
switch (entry.getKey()) {
|
||||
case "agg":
|
||||
assertThat(entry.getValue(), equalTo("date_histogram"));
|
||||
break;
|
||||
case "delay":
|
||||
assertThat(entry.getValue(), equalTo("foo"));
|
||||
break;
|
||||
case "interval":
|
||||
assertThat(entry.getValue(), equalTo("1d"));
|
||||
break;
|
||||
case "time_zone":
|
||||
assertThat(entry.getValue(), equalTo("UTC"));
|
||||
break;
|
||||
default:
|
||||
fail("Unknown field cap: [" + entry.getKey() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
List<Map<String, Object>> valueCaps = fieldCaps.get("value").getAggs();
|
||||
assertThat(valueCaps.size(), equalTo(SUPPORTED_METRICS.size()));
|
||||
}
|
||||
}
|
||||
|
@ -24,10 +24,21 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRespo
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||
import org.elasticsearch.client.tasks.GetTaskRequest;
|
||||
import org.elasticsearch.client.tasks.GetTaskResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.tasks.TaskInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
@ -60,6 +71,56 @@ public class TasksIT extends ESRestHighLevelClientTestCase {
|
||||
}
|
||||
assertTrue("List tasks were not found", listTasksFound);
|
||||
}
|
||||
|
||||
public void testGetValidTask() throws IOException {
|
||||
|
||||
// Run a Reindex to create a task
|
||||
|
||||
final String sourceIndex = "source1";
|
||||
final String destinationIndex = "dest";
|
||||
Settings settings = Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build();
|
||||
createIndex(sourceIndex, settings);
|
||||
createIndex(destinationIndex, settings);
|
||||
BulkRequest bulkRequest = new BulkRequest()
|
||||
.add(new IndexRequest(sourceIndex, "type", "1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON))
|
||||
.add(new IndexRequest(sourceIndex, "type", "2").source(Collections.singletonMap("foo2", "bar2"), XContentType.JSON))
|
||||
.setRefreshPolicy(RefreshPolicy.IMMEDIATE);
|
||||
assertEquals(RestStatus.OK, highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT).status());
|
||||
|
||||
// (need to use low level client because currently high level client
|
||||
// doesn't support async return of task id - needs
|
||||
// https://github.com/elastic/elasticsearch/pull/35202 )
|
||||
RestClient lowClient = highLevelClient().getLowLevelClient();
|
||||
Request request = new Request("POST", "_reindex");
|
||||
request.addParameter("wait_for_completion", "false");
|
||||
request.setJsonEntity("{" + " \"source\": {\n" + " \"index\": \"source1\"\n" + " },\n" + " \"dest\": {\n"
|
||||
+ " \"index\": \"dest\"\n" + " }" + "}");
|
||||
Response response = lowClient.performRequest(request);
|
||||
Map<String, Object> map = entityAsMap(response);
|
||||
Object taskId = map.get("task");
|
||||
assertNotNull(taskId);
|
||||
|
||||
TaskId childTaskId = new TaskId(taskId.toString());
|
||||
GetTaskRequest gtr = new GetTaskRequest(childTaskId.getNodeId(), childTaskId.getId());
|
||||
gtr.setWaitForCompletion(randomBoolean());
|
||||
Optional<GetTaskResponse> getTaskResponse = execute(gtr, highLevelClient().tasks()::get, highLevelClient().tasks()::getAsync);
|
||||
assertTrue(getTaskResponse.isPresent());
|
||||
GetTaskResponse taskResponse = getTaskResponse.get();
|
||||
if (gtr.getWaitForCompletion()) {
|
||||
assertTrue(taskResponse.isCompleted());
|
||||
}
|
||||
TaskInfo info = taskResponse.getTaskInfo();
|
||||
assertTrue(info.isCancellable());
|
||||
assertEquals("reindex from [source1] to [dest]", info.getDescription());
|
||||
assertEquals("indices:data/write/reindex", info.getAction());
|
||||
}
|
||||
|
||||
public void testGetInvalidTask() throws IOException {
|
||||
// Check 404s are returned as empty Optionals
|
||||
GetTaskRequest gtr = new GetTaskRequest("doesNotExistNodeName", 123);
|
||||
Optional<GetTaskResponse> getTaskResponse = execute(gtr, highLevelClient().tasks()::get, highLevelClient().tasks()::getAsync);
|
||||
assertFalse(getTaskResponse.isPresent());
|
||||
}
|
||||
|
||||
public void testCancelTasks() throws IOException {
|
||||
ListTasksRequest listRequest = new ListTasksRequest();
|
||||
|
@ -0,0 +1,107 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.core.tasks;
|
||||
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.client.tasks.GetTaskResponse;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.tasks.RawTaskStatus;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.tasks.TaskInfo;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester;
|
||||
|
||||
public class GetTaskResponseTests extends ESTestCase {
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
xContentTester(
|
||||
this::createParser,
|
||||
this::createTestInstance,
|
||||
this::toXContent,
|
||||
GetTaskResponse::fromXContent)
|
||||
.supportsUnknownFields(true)
|
||||
.assertEqualsConsumer(this::assertEqualInstances)
|
||||
.assertToXContentEquivalence(true)
|
||||
.randomFieldsExcludeFilter(field ->field.endsWith("headers") || field.endsWith("status"))
|
||||
.test();
|
||||
}
|
||||
|
||||
private GetTaskResponse createTestInstance() {
|
||||
return new GetTaskResponse(randomBoolean(), randomTaskInfo());
|
||||
}
|
||||
|
||||
private void toXContent(GetTaskResponse response, XContentBuilder builder) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field(GetTaskResponse.COMPLETED.getPreferredName(), response.isCompleted());
|
||||
builder.startObject(GetTaskResponse.TASK.getPreferredName());
|
||||
response.getTaskInfo().toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
private void assertEqualInstances(GetTaskResponse expectedInstance, GetTaskResponse newInstance) {
|
||||
assertEquals(expectedInstance.isCompleted(), newInstance.isCompleted());
|
||||
assertEquals(expectedInstance.getTaskInfo(), newInstance.getTaskInfo());
|
||||
}
|
||||
|
||||
static TaskInfo randomTaskInfo() {
|
||||
TaskId taskId = randomTaskId();
|
||||
String type = randomAlphaOfLength(5);
|
||||
String action = randomAlphaOfLength(5);
|
||||
Task.Status status = randomBoolean() ? randomRawTaskStatus() : null;
|
||||
String description = randomBoolean() ? randomAlphaOfLength(5) : null;
|
||||
long startTime = randomLong();
|
||||
long runningTimeNanos = randomLong();
|
||||
boolean cancellable = randomBoolean();
|
||||
TaskId parentTaskId = randomBoolean() ? TaskId.EMPTY_TASK_ID : randomTaskId();
|
||||
Map<String, String> headers = randomBoolean() ?
|
||||
Collections.emptyMap() :
|
||||
Collections.singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5));
|
||||
return new TaskInfo(taskId, type, action, description, status, startTime, runningTimeNanos, cancellable, parentTaskId, headers);
|
||||
}
|
||||
|
||||
private static TaskId randomTaskId() {
|
||||
return new TaskId(randomAlphaOfLength(5), randomLong());
|
||||
}
|
||||
|
||||
private static RawTaskStatus randomRawTaskStatus() {
|
||||
try (XContentBuilder builder = XContentBuilder.builder(Requests.INDEX_CONTENT_TYPE.xContent())) {
|
||||
builder.startObject();
|
||||
int fields = between(0, 10);
|
||||
for (int f = 0; f < fields; f++) {
|
||||
builder.field(randomAlphaOfLength(5), randomAlphaOfLength(5));
|
||||
}
|
||||
builder.endObject();
|
||||
return new RawTaskStatus(BytesReference.bytes(builder));
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException(e);
|
||||
}
|
||||
}
|
||||
}
|
@ -38,6 +38,8 @@ import org.elasticsearch.client.rollup.DeleteRollupJobRequest;
|
||||
import org.elasticsearch.client.rollup.DeleteRollupJobResponse;
|
||||
import org.elasticsearch.client.rollup.GetRollupCapsRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupCapsResponse;
|
||||
import org.elasticsearch.client.rollup.GetRollupIndexCapsRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupIndexCapsResponse;
|
||||
import org.elasticsearch.client.rollup.GetRollupJobRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupJobResponse;
|
||||
import org.elasticsearch.client.rollup.GetRollupJobResponse.JobWrapper;
|
||||
@ -406,6 +408,120 @@ public class RollupDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testGetRollupIndexCaps() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
DateHistogramGroupConfig dateHistogram =
|
||||
new DateHistogramGroupConfig("timestamp", DateHistogramInterval.HOUR, new DateHistogramInterval("7d"), "UTC"); // <1>
|
||||
TermsGroupConfig terms = new TermsGroupConfig("hostname", "datacenter");
|
||||
HistogramGroupConfig histogram = new HistogramGroupConfig(5L, "load", "net_in", "net_out");
|
||||
GroupConfig groups = new GroupConfig(dateHistogram, histogram, terms);
|
||||
List<MetricConfig> metrics = new ArrayList<>(); // <1>
|
||||
metrics.add(new MetricConfig("temperature", Arrays.asList("min", "max", "sum")));
|
||||
metrics.add(new MetricConfig("voltage", Arrays.asList("avg", "value_count")));
|
||||
|
||||
//tag::x-pack-rollup-get-rollup-index-caps-setup
|
||||
final String indexPattern = "docs";
|
||||
final String rollupIndexName = "rollup";
|
||||
final String cron = "*/1 * * * * ?";
|
||||
final int pageSize = 100;
|
||||
final TimeValue timeout = null;
|
||||
|
||||
String id = "job_1";
|
||||
RollupJobConfig config = new RollupJobConfig(id, indexPattern, rollupIndexName, cron,
|
||||
pageSize, groups, metrics, timeout);
|
||||
|
||||
PutRollupJobRequest request = new PutRollupJobRequest(config);
|
||||
PutRollupJobResponse response = client.rollup().putRollupJob(request, RequestOptions.DEFAULT);
|
||||
|
||||
boolean acknowledged = response.isAcknowledged();
|
||||
//end::x-pack-rollup-get-rollup-index-caps-setup
|
||||
assertTrue(acknowledged);
|
||||
|
||||
ClusterHealthRequest healthRequest = new ClusterHealthRequest(config.getRollupIndex()).waitForYellowStatus();
|
||||
ClusterHealthResponse healthResponse = client.cluster().health(healthRequest, RequestOptions.DEFAULT);
|
||||
assertFalse(healthResponse.isTimedOut());
|
||||
assertThat(healthResponse.getStatus(), isOneOf(ClusterHealthStatus.YELLOW, ClusterHealthStatus.GREEN));
|
||||
|
||||
// Now that the job is created, we should have a rollup index with metadata.
|
||||
// We can test out the caps API now.
|
||||
|
||||
//tag::x-pack-rollup-get-rollup-index-caps-request
|
||||
GetRollupIndexCapsRequest getRollupIndexCapsRequest = new GetRollupIndexCapsRequest("rollup");
|
||||
//end::x-pack-rollup-get-rollup-index-caps-request
|
||||
|
||||
//tag::x-pack-rollup-get-rollup-index-caps-execute
|
||||
GetRollupIndexCapsResponse capsResponse = client.rollup()
|
||||
.getRollupIndexCapabilities(getRollupIndexCapsRequest, RequestOptions.DEFAULT);
|
||||
//end::x-pack-rollup-get-rollup-index-caps-execute
|
||||
|
||||
//tag::x-pack-rollup-get-rollup-index-caps-response
|
||||
Map<String, RollableIndexCaps> rolledPatterns = capsResponse.getJobs();
|
||||
|
||||
RollableIndexCaps docsPattern = rolledPatterns.get("rollup");
|
||||
|
||||
// indexName will be "rollup", the target index we requested
|
||||
String indexName = docsPattern.getIndexName();
|
||||
|
||||
// Each index pattern can have multiple jobs that rolled it up, so `getJobCaps()`
|
||||
// returns a list of jobs that rolled up the pattern
|
||||
List<RollupJobCaps> rollupJobs = docsPattern.getJobCaps();
|
||||
RollupJobCaps jobCaps = rollupJobs.get(0);
|
||||
|
||||
// jobID is the identifier we used when we created the job (e.g. `job1`)
|
||||
String jobID = jobCaps.getJobID();
|
||||
|
||||
// rollupIndex is the location that the job stored it's rollup docs (e.g. `rollup`)
|
||||
String rollupIndex = jobCaps.getRollupIndex();
|
||||
|
||||
// Finally, fieldCaps are the capabilities of individual fields in the config
|
||||
// The key is the field name, and the value is a RollupFieldCaps object which
|
||||
// provides more info.
|
||||
Map<String, RollupJobCaps.RollupFieldCaps> fieldCaps = jobCaps.getFieldCaps();
|
||||
|
||||
// If we retrieve the "timestamp" field, it returns a list of maps. Each list
|
||||
// item represents a different aggregation that can be run against the "timestamp"
|
||||
// field, and any additional details specific to that agg (interval, etc)
|
||||
List<Map<String, Object>> timestampCaps = fieldCaps.get("timestamp").getAggs();
|
||||
assert timestampCaps.get(0).toString().equals("{agg=date_histogram, delay=7d, interval=1h, time_zone=UTC}");
|
||||
|
||||
// In contrast to the timestamp field, the temperature field has multiple aggs configured
|
||||
List<Map<String, Object>> temperatureCaps = fieldCaps.get("temperature").getAggs();
|
||||
assert temperatureCaps.toString().equals("[{agg=min}, {agg=max}, {agg=sum}]");
|
||||
//end::x-pack-rollup-get-rollup-index-caps-response
|
||||
|
||||
assertThat(indexName, equalTo("rollup"));
|
||||
assertThat(jobID, equalTo("job_1"));
|
||||
assertThat(rollupIndex, equalTo("rollup"));
|
||||
assertThat(fieldCaps.size(), equalTo(8));
|
||||
|
||||
// tag::x-pack-rollup-get-rollup-index-caps-execute-listener
|
||||
ActionListener<GetRollupIndexCapsResponse> listener = new ActionListener<GetRollupIndexCapsResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetRollupIndexCapsResponse response) {
|
||||
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-rollup-get-rollup-index-caps-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-rollup-get-rollup-index-caps-execute-async
|
||||
client.rollup().getRollupIndexCapabilitiesAsync(getRollupIndexCapsRequest, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-rollup-get-rollup-index-caps-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testDeleteRollupJob() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
@ -18,52 +18,13 @@
|
||||
*/
|
||||
package org.elasticsearch.client.rollup;
|
||||
|
||||
import org.elasticsearch.client.rollup.job.config.DateHistogramGroupConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.GroupConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.HistogramGroupConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.MetricConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.RollupJobConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.RollupJobConfigTests;
|
||||
import org.elasticsearch.client.rollup.job.config.TermsGroupConfig;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
|
||||
public class GetRollupCapsResponseTests extends AbstractXContentTestCase<GetRollupCapsResponse> {
|
||||
|
||||
private Map<String, RollableIndexCaps> indices;
|
||||
|
||||
@Before
|
||||
private void setupIndices() throws IOException {
|
||||
int numIndices = randomIntBetween(1,5);
|
||||
indices = new HashMap<>(numIndices);
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
String indexName = "index_" + randomAlphaOfLength(10);
|
||||
int numJobs = randomIntBetween(1,5);
|
||||
List<RollupJobCaps> jobs = new ArrayList<>(numJobs);
|
||||
for (int j = 0; j < numJobs; j++) {
|
||||
RollupJobConfig config = RollupJobConfigTests.randomRollupJobConfig(randomAlphaOfLength(10));
|
||||
jobs.add(new RollupJobCaps(config.getId(), config.getIndexPattern(),
|
||||
config.getRollupIndex(), createRollupFieldCaps(config)));
|
||||
}
|
||||
RollableIndexCaps cap = new RollableIndexCaps(indexName, jobs);
|
||||
indices.put(indexName, cap);
|
||||
}
|
||||
}
|
||||
public class GetRollupCapsResponseTests extends RollupCapsResponseTestCase<GetRollupCapsResponse> {
|
||||
|
||||
@Override
|
||||
protected GetRollupCapsResponse createTestInstance() {
|
||||
@ -71,82 +32,16 @@ public class GetRollupCapsResponseTests extends AbstractXContentTestCase<GetRoll
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
protected void toXContent(GetRollupCapsResponse response, XContentBuilder builder) throws IOException {
|
||||
builder.startObject();
|
||||
for (Map.Entry<String, RollableIndexCaps> entry : response.getJobs().entrySet()) {
|
||||
entry.getValue().toXContent(builder, null);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GetRollupCapsResponse doParseInstance(final XContentParser parser) throws IOException {
|
||||
protected GetRollupCapsResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return GetRollupCapsResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
/**
|
||||
* Lifted from core's RollupJobCaps, so that we can test without having to include this actual logic in the request
|
||||
*/
|
||||
private static Map<String, RollupJobCaps.RollupFieldCaps> createRollupFieldCaps(final RollupJobConfig rollupJobConfig) {
|
||||
final Map<String, List<Map<String, Object>>> tempFieldCaps = new HashMap<>();
|
||||
|
||||
final GroupConfig groupConfig = rollupJobConfig.getGroupConfig();
|
||||
if (groupConfig != null) {
|
||||
// Create RollupFieldCaps for the date histogram
|
||||
final DateHistogramGroupConfig dateHistogram = groupConfig.getDateHistogram();
|
||||
final Map<String, Object> dateHistogramAggCap = new HashMap<>();
|
||||
dateHistogramAggCap.put("agg", DateHistogramAggregationBuilder.NAME);
|
||||
dateHistogramAggCap.put("interval", dateHistogram.getInterval().toString());
|
||||
if (dateHistogram.getDelay() != null) {
|
||||
dateHistogramAggCap.put("delay", dateHistogram.getDelay().toString());
|
||||
}
|
||||
dateHistogramAggCap.put("time_zone", dateHistogram.getTimeZone());
|
||||
|
||||
List<Map<String, Object>> dateAggCaps = tempFieldCaps.getOrDefault(dateHistogram.getField(), new ArrayList<>());
|
||||
dateAggCaps.add(dateHistogramAggCap);
|
||||
tempFieldCaps.put(dateHistogram.getField(), dateAggCaps);
|
||||
|
||||
// Create RollupFieldCaps for the histogram
|
||||
final HistogramGroupConfig histogram = groupConfig.getHistogram();
|
||||
if (histogram != null) {
|
||||
final Map<String, Object> histogramAggCap = new HashMap<>();
|
||||
histogramAggCap.put("agg", HistogramAggregationBuilder.NAME);
|
||||
histogramAggCap.put("interval", histogram.getInterval());
|
||||
Arrays.stream(rollupJobConfig.getGroupConfig().getHistogram().getFields()).forEach(field -> {
|
||||
List<Map<String, Object>> caps = tempFieldCaps.getOrDefault(field, new ArrayList<>());
|
||||
caps.add(histogramAggCap);
|
||||
tempFieldCaps.put(field, caps);
|
||||
});
|
||||
}
|
||||
|
||||
// Create RollupFieldCaps for the term
|
||||
final TermsGroupConfig terms = groupConfig.getTerms();
|
||||
if (terms != null) {
|
||||
final Map<String, Object> termsAggCap = singletonMap("agg", TermsAggregationBuilder.NAME);
|
||||
Arrays.stream(rollupJobConfig.getGroupConfig().getTerms().getFields()).forEach(field -> {
|
||||
List<Map<String, Object>> caps = tempFieldCaps.getOrDefault(field, new ArrayList<>());
|
||||
caps.add(termsAggCap);
|
||||
tempFieldCaps.put(field, caps);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Create RollupFieldCaps for the metrics
|
||||
final List<MetricConfig> metricsConfig = rollupJobConfig.getMetricsConfig();
|
||||
if (metricsConfig.size() > 0) {
|
||||
rollupJobConfig.getMetricsConfig().forEach(metricConfig -> {
|
||||
final List<Map<String, Object>> metrics = metricConfig.getMetrics().stream()
|
||||
.map(metric -> singletonMap("agg", (Object) metric))
|
||||
.collect(Collectors.toList());
|
||||
metrics.forEach(m -> {
|
||||
List<Map<String, Object>> caps = tempFieldCaps
|
||||
.getOrDefault(metricConfig.getField(), new ArrayList<>());
|
||||
caps.add(m);
|
||||
tempFieldCaps.put(metricConfig.getField(), caps);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
return Collections.unmodifiableMap(tempFieldCaps.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey,
|
||||
e -> new RollupJobCaps.RollupFieldCaps(e.getValue()))));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.rollup;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class GetRollupIndexCapsRequestTests extends ESTestCase {
|
||||
|
||||
public void testNullOrEmptyIndices() {
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new GetRollupIndexCapsRequest((String[]) null));
|
||||
assertThat(e.getMessage(), equalTo("[indices] must not be null or empty"));
|
||||
|
||||
String[] indices = new String[]{};
|
||||
e = expectThrows(IllegalArgumentException.class, () -> new GetRollupIndexCapsRequest(indices));
|
||||
assertThat(e.getMessage(), equalTo("[indices] must not be null or empty"));
|
||||
|
||||
e = expectThrows(IllegalArgumentException.class, () -> new GetRollupIndexCapsRequest(new String[]{"foo", null}));
|
||||
assertThat(e.getMessage(), equalTo("[index] must not be null or empty"));
|
||||
}
|
||||
}
|
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.rollup;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
public class GetRollupIndexCapsResponseTests extends RollupCapsResponseTestCase<GetRollupIndexCapsResponse> {
|
||||
|
||||
@Override
|
||||
protected GetRollupIndexCapsResponse createTestInstance() {
|
||||
return new GetRollupIndexCapsResponse(indices);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void toXContent(GetRollupIndexCapsResponse response, XContentBuilder builder) throws IOException {
|
||||
builder.startObject();
|
||||
for (Map.Entry<String, RollableIndexCaps> entry : response.getJobs().entrySet()) {
|
||||
entry.getValue().toXContent(builder, null);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GetRollupIndexCapsResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return GetRollupIndexCapsResponse.fromXContent(parser);
|
||||
}
|
||||
}
|
@ -0,0 +1,156 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.rollup;
|
||||
|
||||
import org.elasticsearch.client.rollup.job.config.DateHistogramGroupConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.GroupConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.HistogramGroupConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.MetricConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.RollupJobConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.RollupJobConfigTests;
|
||||
import org.elasticsearch.client.rollup.job.config.TermsGroupConfig;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester;
|
||||
|
||||
abstract class RollupCapsResponseTestCase<T> extends ESTestCase {
|
||||
|
||||
protected Map<String, RollableIndexCaps> indices;
|
||||
|
||||
protected abstract T createTestInstance();
|
||||
|
||||
protected abstract void toXContent(T response, XContentBuilder builder) throws IOException;
|
||||
|
||||
protected abstract T fromXContent(XContentParser parser) throws IOException;
|
||||
|
||||
public void testFromXContent() throws IOException {
|
||||
xContentTester(
|
||||
this::createParser,
|
||||
this::createTestInstance,
|
||||
this::toXContent,
|
||||
this::fromXContent)
|
||||
.supportsUnknownFields(false)
|
||||
.randomFieldsExcludeFilter(field ->
|
||||
field.endsWith("job_id"))
|
||||
.test();
|
||||
}
|
||||
|
||||
@Before
|
||||
private void setupIndices() throws IOException {
|
||||
int numIndices = randomIntBetween(1,5);
|
||||
indices = new HashMap<>(numIndices);
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
String indexName = "index_" + randomAlphaOfLength(10);
|
||||
int numJobs = randomIntBetween(1,5);
|
||||
List<RollupJobCaps> jobs = new ArrayList<>(numJobs);
|
||||
for (int j = 0; j < numJobs; j++) {
|
||||
RollupJobConfig config = RollupJobConfigTests.randomRollupJobConfig(randomAlphaOfLength(10));
|
||||
jobs.add(new RollupJobCaps(config.getId(), config.getIndexPattern(),
|
||||
config.getRollupIndex(), createRollupFieldCaps(config)));
|
||||
}
|
||||
RollableIndexCaps cap = new RollableIndexCaps(indexName, jobs);
|
||||
indices.put(indexName, cap);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Lifted from core's RollupJobCaps, so that we can test without having to include this actual logic in the request
|
||||
*/
|
||||
private static Map<String, RollupJobCaps.RollupFieldCaps> createRollupFieldCaps(final RollupJobConfig rollupJobConfig) {
|
||||
final Map<String, List<Map<String, Object>>> tempFieldCaps = new HashMap<>();
|
||||
|
||||
final GroupConfig groupConfig = rollupJobConfig.getGroupConfig();
|
||||
if (groupConfig != null) {
|
||||
// Create RollupFieldCaps for the date histogram
|
||||
final DateHistogramGroupConfig dateHistogram = groupConfig.getDateHistogram();
|
||||
final Map<String, Object> dateHistogramAggCap = new HashMap<>();
|
||||
dateHistogramAggCap.put("agg", DateHistogramAggregationBuilder.NAME);
|
||||
dateHistogramAggCap.put("interval", dateHistogram.getInterval().toString());
|
||||
if (dateHistogram.getDelay() != null) {
|
||||
dateHistogramAggCap.put("delay", dateHistogram.getDelay().toString());
|
||||
}
|
||||
dateHistogramAggCap.put("time_zone", dateHistogram.getTimeZone());
|
||||
|
||||
List<Map<String, Object>> dateAggCaps = tempFieldCaps.getOrDefault(dateHistogram.getField(), new ArrayList<>());
|
||||
dateAggCaps.add(dateHistogramAggCap);
|
||||
tempFieldCaps.put(dateHistogram.getField(), dateAggCaps);
|
||||
|
||||
// Create RollupFieldCaps for the histogram
|
||||
final HistogramGroupConfig histogram = groupConfig.getHistogram();
|
||||
if (histogram != null) {
|
||||
final Map<String, Object> histogramAggCap = new HashMap<>();
|
||||
histogramAggCap.put("agg", HistogramAggregationBuilder.NAME);
|
||||
histogramAggCap.put("interval", histogram.getInterval());
|
||||
Arrays.stream(rollupJobConfig.getGroupConfig().getHistogram().getFields()).forEach(field -> {
|
||||
List<Map<String, Object>> caps = tempFieldCaps.getOrDefault(field, new ArrayList<>());
|
||||
caps.add(histogramAggCap);
|
||||
tempFieldCaps.put(field, caps);
|
||||
});
|
||||
}
|
||||
|
||||
// Create RollupFieldCaps for the term
|
||||
final TermsGroupConfig terms = groupConfig.getTerms();
|
||||
if (terms != null) {
|
||||
final Map<String, Object> termsAggCap = singletonMap("agg", TermsAggregationBuilder.NAME);
|
||||
Arrays.stream(rollupJobConfig.getGroupConfig().getTerms().getFields()).forEach(field -> {
|
||||
List<Map<String, Object>> caps = tempFieldCaps.getOrDefault(field, new ArrayList<>());
|
||||
caps.add(termsAggCap);
|
||||
tempFieldCaps.put(field, caps);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Create RollupFieldCaps for the metrics
|
||||
final List<MetricConfig> metricsConfig = rollupJobConfig.getMetricsConfig();
|
||||
if (metricsConfig.size() > 0) {
|
||||
rollupJobConfig.getMetricsConfig().forEach(metricConfig -> {
|
||||
final List<Map<String, Object>> metrics = metricConfig.getMetrics().stream()
|
||||
.map(metric -> singletonMap("agg", (Object) metric))
|
||||
.collect(Collectors.toList());
|
||||
metrics.forEach(m -> {
|
||||
List<Map<String, Object>> caps = tempFieldCaps
|
||||
.getOrDefault(metricConfig.getField(), new ArrayList<>());
|
||||
caps.add(m);
|
||||
tempFieldCaps.put(metricConfig.getField(), caps);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
return Collections.unmodifiableMap(tempFieldCaps.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey,
|
||||
e -> new RollupJobCaps.RollupFieldCaps(e.getValue()))));
|
||||
}
|
||||
}
|
@ -0,0 +1,84 @@
|
||||
--
|
||||
:api: rollup-get-rollup-index-caps
|
||||
:request: GetRollupIndexCapsRequest
|
||||
:response: GetRollupIndexCapsResponse
|
||||
--
|
||||
|
||||
[id="{upid}-x-pack-{api}"]
|
||||
=== Get Rollup Index Capabilities API
|
||||
|
||||
The Get Rollup Index Capabilities API allows the user to determine if a concrete index or index pattern contains
|
||||
stored rollup jobs and data. If it contains data stored from rollup jobs, the capabilities of those jobs
|
||||
are returned. The API accepts a `GetRollupIndexCapsRequest` object as a request and returns a `GetRollupIndexCapsResponse`.
|
||||
|
||||
[id="{upid}-x-pack-{api}-request"]
|
||||
==== Get Rollup Index Capabilities Request
|
||||
|
||||
A +{request}+ requires a single parameter: the target index or index pattern (e.g. `rollup-foo`):
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[x-pack-{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
[id="{upid}-x-pack-{api}-execution"]
|
||||
==== Execution
|
||||
|
||||
The Get Rollup Index Capabilities API can be executed through a `RollupClient`
|
||||
instance. Such instance can be retrieved from a `RestHighLevelClient`
|
||||
using the `rollup()` method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[x-pack-{api}-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[id="{upid}-x-pack-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ holds lists and maps of values which correspond to the capabilities
|
||||
of the rollup index/index pattern (what jobs are stored in the index, their capabilities, what
|
||||
aggregations are available, etc). Because multiple jobs can be stored in one index, the
|
||||
response may include several jobs with different configurations.
|
||||
|
||||
The capabilities are essentially the same as the original job configuration, just presented in a different
|
||||
manner. For example, if we had created a job with the following config:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[x-pack-{api}-setup]
|
||||
--------------------------------------------------
|
||||
|
||||
The +{response}+ object would contain the same information, laid out in a slightly different manner:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[x-pack-{api}-response]
|
||||
--------------------------------------------------
|
||||
|
||||
[id="{upid}-x-pack-{api}-async"]
|
||||
==== Asynchronous Execution
|
||||
|
||||
This request can be executed asynchronously:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[x-pack-{api}-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The +{request}+ to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for +{response}+ looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[x-pack-{api}-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
@ -311,12 +311,14 @@ The Java High Level REST Client supports the following Rollup APIs:
|
||||
* <<{upid}-rollup-delete-job>>
|
||||
* <<java-rest-high-x-pack-rollup-get-job>>
|
||||
* <<{upid}-x-pack-rollup-get-rollup-caps>>
|
||||
* <<{upid}-x-pack-rollup-get-rollup-index-caps>>
|
||||
|
||||
include::rollup/put_job.asciidoc[]
|
||||
include::rollup/start_job.asciidoc[]
|
||||
include::rollup/delete_job.asciidoc[]
|
||||
include::rollup/get_job.asciidoc[]
|
||||
include::rollup/get_rollup_caps.asciidoc[]
|
||||
include::rollup/get_rollup_index_caps.asciidoc[]
|
||||
|
||||
== Security APIs
|
||||
|
||||
|
@ -208,8 +208,12 @@ The following settings are supported:
|
||||
|
||||
`storage_class`::
|
||||
|
||||
Sets the S3 storage class type for the backup files. Values may be
|
||||
`standard`, `reduced_redundancy`, `standard_ia`. Defaults to `standard`.
|
||||
Sets the S3 storage class for objects stored in the snapshot repository.
|
||||
Values may be `standard`, `reduced_redundancy`, `standard_ia`.
|
||||
Defaults to `standard`. Changing this setting on an existing repository
|
||||
only affects the storage class for newly created objects, resulting in a
|
||||
mixed usage of storage classes. Additionally, S3 Lifecycle Policies can
|
||||
be used to manage the storage class of existing objects.
|
||||
Due to the extra complexity with the Glacier class lifecycle, it is not
|
||||
currently supported by the plugin. For more information about the
|
||||
different classes, see http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html[AWS Storage Classes Guide]
|
||||
|
@ -190,6 +190,7 @@ GET /follower_index/_ccr/stats
|
||||
// CONSOLE
|
||||
|
||||
The API returns the following results:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
|
@ -12,12 +12,155 @@ Get {ccr} stats.
|
||||
|
||||
==== Description
|
||||
|
||||
This API gets {ccr} stats.
|
||||
This API gets {ccr} stats. This API will return all stats related to {ccr}. In
|
||||
particular, this API returns stats about auto-following, and returns the same
|
||||
shard-level stats as in the <<ccr-get-follow-stats,get follower stats API>>.
|
||||
|
||||
==== Request
|
||||
|
||||
//////////////////////////
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /follower_index/_ccr/follow
|
||||
{
|
||||
"remote_cluster" : "remote_cluster",
|
||||
"leader_index" : "leader_index"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TESTSETUP
|
||||
// TEST[setup:remote_cluster_and_leader_index]
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /follower_index/_ccr/pause_follow
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEARDOWN
|
||||
|
||||
//////////////////////////
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_ccr/stats
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
==== Results
|
||||
|
||||
This API returns the following information:
|
||||
|
||||
`auto_follow_stats`::
|
||||
(object) an object representing stats for the auto-follow coordinator
|
||||
|
||||
This object consists of the following fields:
|
||||
|
||||
`auto_follow_stats.number_of_failed_follow_indices`::
|
||||
(long) the number of indices that the auto-follow coordinator failed to
|
||||
automatically follow; the causes of recent failures are captured in the logs
|
||||
of the elected master node, and in the
|
||||
`auto_follow_stats.recent_auto_follow_errors` field
|
||||
|
||||
`auto_follow_stats.number_of_failed_remote_cluster_state_requests`::
|
||||
(long) the number of times that the auto-follow coordinator failed to retrieve
|
||||
the cluster state from a remote cluster registered in a collection of
|
||||
auto-follow patterns
|
||||
|
||||
`auto_follow_stats.number_of_successful_follow_indices`::
|
||||
(long) the number of indices that the auto-follow coordinator successfully
|
||||
followed
|
||||
|
||||
`auto_follow_stats.recent_auto_follow_errors`::
|
||||
(array) an array of objects representing failures by the auto-follow
|
||||
coordinator
|
||||
|
||||
`follow_stats`::
|
||||
(object) an object representing shard-level stats for follower indices; refer
|
||||
to the details of the response in the
|
||||
<<ccr-get-follow-stats,get follower stats API>>
|
||||
|
||||
==== Example
|
||||
|
||||
This example retrieves {ccr} stats:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_ccr/stats
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
The API returns the following results:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"auto_follow_stats" : {
|
||||
"number_of_failed_follow_indices" : 0,
|
||||
"number_of_failed_remote_cluster_state_requests" : 0,
|
||||
"number_of_successful_follow_indices" : 1,
|
||||
"recent_auto_follow_errors" : [ ]
|
||||
},
|
||||
"follow_stats" : {
|
||||
"indices" : [
|
||||
{
|
||||
"index" : "follower_index",
|
||||
"shards" : [
|
||||
{
|
||||
"remote_cluster" : "remote_cluster",
|
||||
"leader_index" : "leader_index",
|
||||
"follower_index" : "follower_index",
|
||||
"shard_id" : 0,
|
||||
"leader_global_checkpoint" : 1024,
|
||||
"leader_max_seq_no" : 1536,
|
||||
"follower_global_checkpoint" : 768,
|
||||
"follower_max_seq_no" : 896,
|
||||
"last_requested_seq_no" : 897,
|
||||
"outstanding_read_requests" : 8,
|
||||
"outstanding_write_requests" : 2,
|
||||
"write_buffer_operation_count" : 64,
|
||||
"follower_mapping_version" : 4,
|
||||
"follower_settings_version" : 2,
|
||||
"total_read_time_millis" : 32768,
|
||||
"total_read_remote_exec_time_millis" : 16384,
|
||||
"successful_read_requests" : 32,
|
||||
"failed_read_requests" : 0,
|
||||
"operations_read" : 896,
|
||||
"bytes_read" : 32768,
|
||||
"total_write_time_millis" : 16384,
|
||||
"write_buffer_size_in_bytes" : 1536,
|
||||
"successful_write_requests" : 16,
|
||||
"failed_write_requests" : 0,
|
||||
"operations_written" : 832,
|
||||
"read_exceptions" : [ ],
|
||||
"time_since_last_read_millis" : 8
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/"number_of_successful_follow_indices" : 1/"number_of_successful_follow_indices" : $body.auto_follow_stats.number_of_successful_follow_indices/]
|
||||
// TESTRESPONSE[s/"leader_global_checkpoint" : 1024/"leader_global_checkpoint" : $body.follow_stats.indices.0.shards.0.leader_global_checkpoint/]
|
||||
// TESTRESPONSE[s/"leader_max_seq_no" : 1536/"leader_max_seq_no" : $body.follow_stats.indices.0.shards.0.leader_max_seq_no/]
|
||||
// TESTRESPONSE[s/"follower_global_checkpoint" : 768/"follower_global_checkpoint" : $body.follow_stats.indices.0.shards.0.follower_global_checkpoint/]
|
||||
// TESTRESPONSE[s/"follower_max_seq_no" : 896/"follower_max_seq_no" : $body.follow_stats.indices.0.shards.0.follower_max_seq_no/]
|
||||
// TESTRESPONSE[s/"last_requested_seq_no" : 897/"last_requested_seq_no" : $body.follow_stats.indices.0.shards.0.last_requested_seq_no/]
|
||||
// TESTRESPONSE[s/"outstanding_read_requests" : 8/"outstanding_read_requests" : $body.follow_stats.indices.0.shards.0.outstanding_read_requests/]
|
||||
// TESTRESPONSE[s/"outstanding_write_requests" : 2/"outstanding_write_requests" : $body.follow_stats.indices.0.shards.0.outstanding_write_requests/]
|
||||
// TESTRESPONSE[s/"write_buffer_operation_count" : 64/"write_buffer_operation_count" : $body.follow_stats.indices.0.shards.0.write_buffer_operation_count/]
|
||||
// TESTRESPONSE[s/"follower_mapping_version" : 4/"follower_mapping_version" : $body.follow_stats.indices.0.shards.0.follower_mapping_version/]
|
||||
// TESTRESPONSE[s/"follower_settings_version" : 2/"follower_settings_version" : $body.follow_stats.indices.0.shards.0.follower_settings_version/]
|
||||
// TESTRESPONSE[s/"total_read_time_millis" : 32768/"total_read_time_millis" : $body.follow_stats.indices.0.shards.0.total_read_time_millis/]
|
||||
// TESTRESPONSE[s/"total_read_remote_exec_time_millis" : 16384/"total_read_remote_exec_time_millis" : $body.follow_stats.indices.0.shards.0.total_read_remote_exec_time_millis/]
|
||||
// TESTRESPONSE[s/"successful_read_requests" : 32/"successful_read_requests" : $body.follow_stats.indices.0.shards.0.successful_read_requests/]
|
||||
// TESTRESPONSE[s/"failed_read_requests" : 0/"failed_read_requests" : $body.follow_stats.indices.0.shards.0.failed_read_requests/]
|
||||
// TESTRESPONSE[s/"operations_read" : 896/"operations_read" : $body.follow_stats.indices.0.shards.0.operations_read/]
|
||||
// TESTRESPONSE[s/"bytes_read" : 32768/"bytes_read" : $body.follow_stats.indices.0.shards.0.bytes_read/]
|
||||
// TESTRESPONSE[s/"total_write_time_millis" : 16384/"total_write_time_millis" : $body.follow_stats.indices.0.shards.0.total_write_time_millis/]
|
||||
// TESTRESPONSE[s/"write_buffer_size_in_bytes" : 1536/"write_buffer_size_in_bytes" : $body.follow_stats.indices.0.shards.0.write_buffer_size_in_bytes/]
|
||||
// TESTRESPONSE[s/"successful_write_requests" : 16/"successful_write_requests" : $body.follow_stats.indices.0.shards.0.successful_write_requests/]
|
||||
// TESTRESPONSE[s/"failed_write_requests" : 0/"failed_write_requests" : $body.follow_stats.indices.0.shards.0.failed_write_requests/]
|
||||
// TESTRESPONSE[s/"operations_written" : 832/"operations_written" : $body.follow_stats.indices.0.shards.0.operations_written/]
|
||||
// TESTRESPONSE[s/"time_since_last_read_millis" : 8/"time_since_last_read_millis" : $body.follow_stats.indices.0.shards.0.time_since_last_read_millis/]
|
||||
|
@ -1,7 +1,329 @@
|
||||
[role="xpack"]
|
||||
[testenv="platinum"]
|
||||
[[ccr-getting-started]]
|
||||
== Getting Started
|
||||
== Getting Started with {ccr}
|
||||
|
||||
beta[]
|
||||
This is the getting started section of the {ccr} docs.
|
||||
|
||||
This getting-started guide for {ccr} shows you how to:
|
||||
|
||||
* <<ccr-getting-started-remote-cluster,Connect a local cluster to a remote
|
||||
cluster>>
|
||||
* <<ccr-getting-started-leader-index,Create a leader index>> in a remote cluster
|
||||
* <<ccr-getting-started-follower-index,Create a follower index>> that replicates
|
||||
a leader index
|
||||
* <<ccr-getting-started-auto-follow,Automatically create follower indices>>
|
||||
|
||||
[float]
|
||||
[[ccr-getting-started-before-you-begin]]
|
||||
=== Before you begin
|
||||
. {stack-gs}/get-started-elastic-stack.html#install-elasticsearch[Install {es}]
|
||||
on your local and remote clusters.
|
||||
|
||||
. Obtain a license that includes the {ccr} features. See
|
||||
https://www.elastic.co/subscriptions[subscriptions] and
|
||||
<<license-management>>.
|
||||
|
||||
. If the Elastic {security-features} are enabled in your local and remote
|
||||
clusters, you need a user that has appropriate authority to perform the steps
|
||||
in this tutorial.
|
||||
+
|
||||
--
|
||||
[[ccr-getting-started-security]]
|
||||
The {ccr} features use cluster privileges and built-in roles to make it easier
|
||||
to control which users have authority to manage {ccr}.
|
||||
|
||||
By default, you can perform all of the steps in this tutorial by
|
||||
using the built-in `elastic` user. However, a password must be set for this user
|
||||
before the user can do anything. For information about how to set that password,
|
||||
see <<security-getting-started>>.
|
||||
|
||||
If you are performing these steps in a production environment, take extra care
|
||||
because the `elastic` user has the `superuser` role and you could inadvertently
|
||||
make significant changes.
|
||||
|
||||
Alternatively, you can assign the appropriate privileges to a user ID of your
|
||||
choice. On the remote cluster that contains the leader index, a user will need
|
||||
the `read_ccr` cluster privilege and `monitor` and `read` privileges on the
|
||||
leader index.
|
||||
|
||||
[source,yml]
|
||||
--------------------------------------------------
|
||||
ccr_user:
|
||||
cluster:
|
||||
- read_ccr
|
||||
indices:
|
||||
- names: [ 'leader-index' ]
|
||||
privileges:
|
||||
- monitor
|
||||
- read
|
||||
--------------------------------------------------
|
||||
|
||||
On the local cluster that contains the follower index, the same user will need
|
||||
the `manage_ccr` cluster privilege and `monitor`, `read`, `write` and
|
||||
`manage_follow_index` privileges on the follower index.
|
||||
|
||||
[source,yml]
|
||||
--------------------------------------------------
|
||||
ccr_user:
|
||||
cluster:
|
||||
- manage_ccr
|
||||
indices:
|
||||
- names: [ 'follower-index' ]
|
||||
privileges:
|
||||
- monitor
|
||||
- read
|
||||
- write
|
||||
- manage_follow_index
|
||||
--------------------------------------------------
|
||||
|
||||
If you are managing
|
||||
<<ccr-getting-started-remote-cluster,connecting to the remote cluster>> via the
|
||||
cluster update settings API, you will also need a user with the `all` cluster
|
||||
privilege.
|
||||
--
|
||||
|
||||
[float]
|
||||
[[ccr-getting-started-remote-cluster]]
|
||||
=== Connecting to a remote cluster
|
||||
|
||||
The {ccr} features require that you
|
||||
{ref}/modules-remote-clusters.html[connect your local cluster to a remote
|
||||
cluster]. In this tutorial, we will connect our local cluster to a remote
|
||||
cluster with the cluster alias `leader`.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /_cluster/settings
|
||||
{
|
||||
"persistent" : {
|
||||
"cluster" : {
|
||||
"remote" : {
|
||||
"leader" : {
|
||||
"seeds" : [
|
||||
"127.0.0.1:9300" <1>
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:host]
|
||||
// TEST[s/127.0.0.1:9300/\${transport_host}/]
|
||||
<1> Specifies the hostname and transport port of a seed node in the remote
|
||||
cluster.
|
||||
|
||||
You can verify that the local cluster is successfully connected to the remote
|
||||
cluster.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_remote/info
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
The API will respond by showing that the local cluster is connected to the
|
||||
remote cluster.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"leader" : {
|
||||
"seeds" : [
|
||||
"127.0.0.1:9300"
|
||||
],
|
||||
"connected" : true, <1>
|
||||
"num_nodes_connected" : 1, <2>
|
||||
"max_connections_per_cluster" : 3,
|
||||
"initial_connect_timeout" : "30s",
|
||||
"skip_unavailable" : false
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
// TEST[s/127.0.0.1:9300/$body.leader.seeds.0/]
|
||||
// TEST[s/"connected" : true/"connected" : $body.leader.connected/]
|
||||
// TEST[s/"num_nodes_connected" : 1/"num_nodes_connected" : $body.leader.num_nodes_connected/]
|
||||
<1> This shows the local cluster is connected to the remote cluster with cluster
|
||||
alias `leader`
|
||||
<2> This shows the number of nodes in the remote cluster the local cluster is
|
||||
connected to.
|
||||
|
||||
[float]
|
||||
[[ccr-getting-started-leader-index]]
|
||||
=== Creating a leader index
|
||||
|
||||
Leader indices require special index settings to ensure that the operations that
|
||||
need to be replicated are available when the
|
||||
follower requests them from the leader. These settings are used to enable soft
|
||||
deletes on the leader index and to control how many soft deletes are retained. A
|
||||
_soft delete_ occurs whenever a document is deleted or updated. Soft deletes can
|
||||
be enabled only on new indices created on or after {es} 6.5.0.
|
||||
|
||||
In the following example, we will create a leader index in the remote cluster:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /server-metrics
|
||||
{
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : 1,
|
||||
"number_of_replicas" : 0,
|
||||
"soft_deletes" : {
|
||||
"enabled" : true, <1>
|
||||
"retention" : {
|
||||
"operations" : 1024 <2>
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"mappings" : {
|
||||
"metric" : {
|
||||
"properties" : {
|
||||
"@timestamp" : {
|
||||
"type" : "date"
|
||||
},
|
||||
"accept" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"deny" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"host" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"response" : {
|
||||
"type" : "float"
|
||||
},
|
||||
"service" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"total" : {
|
||||
"type" : "long"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
<1> Enables soft deletes on the leader index.
|
||||
<2> Sets that up to 1024 soft deletes will be retained.
|
||||
|
||||
[float]
|
||||
[[ccr-getting-started-follower-index]]
|
||||
=== Creating a follower index
|
||||
|
||||
Follower indices are created with the {ref}/ccr-put-follow.html[create follower
|
||||
API]. When you create a follower index, you must reference the
|
||||
<<ccr-getting-started-remote-cluster,remote cluster>> and the
|
||||
<<ccr-getting-started-leader-index,leader index>> that you created in the remote
|
||||
cluster.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /server-metrics-copy/_ccr/follow
|
||||
{
|
||||
"remote_cluster" : "leader",
|
||||
"leader_index" : "server-metrics"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
//////////////////////////
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"follow_index_created" : true,
|
||||
"follow_index_shards_acked" : true,
|
||||
"index_following_started" : true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
//////////////////////////
|
||||
|
||||
Now when you index documents into your leader index, you will see these
|
||||
documents replicated in the follower index. You can
|
||||
inspect the status of replication using the
|
||||
{ref}/ccr-get-follow-stats[get follower stats API].
|
||||
|
||||
//////////////////////////
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /server-metrics-copy/_ccr/pause_follow
|
||||
|
||||
POST /server-metrics-copy/_close
|
||||
|
||||
POST /server-metrics-copy/_ccr/unfollow
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
//////////////////////////
|
||||
|
||||
[float]
|
||||
[[ccr-getting-started-auto-follow]]
|
||||
=== Automatically create follower indices
|
||||
|
||||
The auto-follow feature in {ccr} helps for time series use cases where you want
|
||||
to follow new indices that are periodically created in the remote cluster
|
||||
(such as daily Beats indices). Auto-following is configured using the
|
||||
{ref}/ccr-put-auto-follow-pattern.html[create auto-follow pattern API]. With an
|
||||
auto-follow pattern, you reference the
|
||||
<<ccr-getting-started-remote-cluster,remote cluster>> that you connected your
|
||||
local cluster to. You must also specify a collection of patterns that match the
|
||||
indices you want to automatically follow.
|
||||
|
||||
For example:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /_ccr/auto_follow/beats
|
||||
{
|
||||
"remote_cluster" : "leader",
|
||||
"leader_index_patterns" :
|
||||
[
|
||||
"metricbeat-*", <1>
|
||||
"packetbeat-*" <2>
|
||||
],
|
||||
"follow_index_pattern" : "{{leader_index}}-copy" <3>
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
<1> Automatically follow new {metricbeat} indices.
|
||||
<2> Automatically follow new {packetbeat} indices.
|
||||
<3> The name of the follower index is derived from the name of the leader index
|
||||
by adding the suffix `-copy` to the name of the leader index.
|
||||
|
||||
//////////////////////////
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"acknowledged" : true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
//////////////////////////
|
||||
|
||||
//////////////////////////
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /_ccr/auto_follow/beats
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
//////////////////////////
|
||||
|
@ -65,8 +65,8 @@ Instantiates a job.
|
||||
score are applied, as new data is seen. See <<ml-job-resource>>.
|
||||
|
||||
`results_index_name`::
|
||||
(string) The name of the index in which to store the {ml} results. The default
|
||||
value is `shared`, which corresponds to the index name `.ml-anomalies-shared`.
|
||||
(string) A text string that affects the name of the {ml} results index. The
|
||||
default value is `shared`, which generates an index named `.ml-anomalies-shared`.
|
||||
|
||||
`results_retention_days`::
|
||||
(long) Advanced configuration option. The number of days for which job results
|
||||
|
@ -1 +0,0 @@
|
||||
8db13c6e146c851614c9f862f1eac67431f9b509
|
@ -0,0 +1 @@
|
||||
8f76b85824b273fafa1e25610c3aff66b97b0dd1
|
@ -1 +0,0 @@
|
||||
b474e1a2d7f0172338a08f159849a6c491781d70
|
@ -0,0 +1 @@
|
||||
ee5e4e4341fdde3978b01945bbfaac72a200fa04
|
@ -1 +0,0 @@
|
||||
fc547e69837bcb808f1782bfa35490645bab9cae
|
@ -0,0 +1 @@
|
||||
34dfcdd2e37b62ad01a8bb4fbda66ea6bf513c28
|
@ -1 +0,0 @@
|
||||
e08961a2ec9414947693659ff79bb7e21a410298
|
@ -0,0 +1 @@
|
||||
25f02c3dfee4efbfe74d87558a6bdd0ea8389e12
|
@ -1 +0,0 @@
|
||||
09280919225656c7ce2a14af29666a02bd86c540
|
@ -0,0 +1 @@
|
||||
1023375e89d6340a93c2409c726a881752eb4ac1
|
@ -1 +0,0 @@
|
||||
880f10393cdefff7575fbf5b2ced890666ec81dc
|
@ -0,0 +1 @@
|
||||
70e598154fb5cb3dced5e82de4afcde2009f1755
|
@ -1 +0,0 @@
|
||||
b41451a9d4e30b8a9a14ccdd7553e5796f77cf44
|
@ -0,0 +1 @@
|
||||
e8b4634d426efee1515fc289b4ad67d1c714d14d
|
@ -1 +0,0 @@
|
||||
145fd2c803d682c2cb2d78e6e350e09a09a09ea0
|
@ -0,0 +1 @@
|
||||
9f53e03113ca04c337d678126acf025cfeccff6e
|
@ -25,6 +25,7 @@ import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
@ -36,11 +37,21 @@ public class EvilOsProbeTests extends ESTestCase {
|
||||
public void testOsPrettyName() throws IOException {
|
||||
final OsInfo osInfo = OsProbe.getInstance().osInfo(randomLongBetween(1, 100), randomIntBetween(1, 8));
|
||||
if (Constants.LINUX) {
|
||||
final List<String> lines = Files.readAllLines(PathUtils.get("/etc/os-release"));
|
||||
final List<String> lines;
|
||||
if (Files.exists(PathUtils.get("/etc/os-release"))) {
|
||||
lines = Files.readAllLines(PathUtils.get("/etc/os-release"));
|
||||
} else if (Files.exists(PathUtils.get("/usr/lib/os-release"))) {
|
||||
lines = Files.readAllLines(PathUtils.get("/usr/lib/os-release"));
|
||||
} else {
|
||||
lines = Collections.singletonList(
|
||||
"PRETTY_NAME=\"" + Files.readAllLines(PathUtils.get("/etc/system-release")).get(0) + "\"");
|
||||
}
|
||||
for (final String line : lines) {
|
||||
if (line != null && line.startsWith("PRETTY_NAME=")) {
|
||||
final Matcher matcher = Pattern.compile("PRETTY_NAME=(\"?|'?)?([^\"']+)\\1").matcher(line);
|
||||
assert matcher.matches() : line;
|
||||
final Matcher matcher = Pattern.compile("PRETTY_NAME=(\"?|'?)?([^\"']+)\\1").matcher(line.trim());
|
||||
final boolean matches = matcher.matches();
|
||||
assert matches : line;
|
||||
assert matcher.groupCount() == 2 : line;
|
||||
final String prettyName = matcher.group(2);
|
||||
assertThat(osInfo.getPrettyName(), equalTo(prettyName));
|
||||
return;
|
||||
|
@ -1 +0,0 @@
|
||||
6bb87c96d76cdc70be77261d39376613b0a8860c
|
@ -0,0 +1 @@
|
||||
ee88dcf4ea69de2a13df7b76d5524e8fd442f243
|
@ -1 +0,0 @@
|
||||
1b29b3e3b080ec32073c007a1940e5aa7b195316
|
@ -0,0 +1 @@
|
||||
ec090fd8bd804775aa128ccb20467b062b72d625
|
@ -1 +0,0 @@
|
||||
3757a90f73f505d40e6e200d1bacbff897f67548
|
@ -0,0 +1 @@
|
||||
0bba71a2e8bfd1c15db407ff06ee4185a091d5ec
|
@ -1 +0,0 @@
|
||||
c918cc5ac54e5a4dba4740e9e45a93ebd3c95c77
|
@ -0,0 +1 @@
|
||||
fcee5b1586f7c695c65863ca9ee3a8ebe99c3242
|
@ -1 +0,0 @@
|
||||
6cff1fa9ac25c840589d9a39a42ed4629b594cf4
|
@ -0,0 +1 @@
|
||||
0a26a4870e9fddae497be6899fe9a0a2d3002294
|
@ -1 +0,0 @@
|
||||
2a843337e03493ab5f3498b5dd232fa9abb9e765
|
@ -0,0 +1 @@
|
||||
700722c50f8bfcb2d1773b50f43519603961d0ce
|
@ -1 +0,0 @@
|
||||
afda00bbee5fb8b4c36867eabb83267b3b2b8c10
|
@ -0,0 +1 @@
|
||||
9c9657903e4ade7773aaaf76f19d96e2a936e42d
|
@ -1 +0,0 @@
|
||||
a2d8bc6a0486cfa6b4de8c1103017b35c0193544
|
@ -0,0 +1 @@
|
||||
58ce1753cc41dfe445423c4cee42c129576a2ca2
|
@ -1 +0,0 @@
|
||||
79a3b80245a9cf00f24f5d6e298a8e1a887760f1
|
@ -0,0 +1 @@
|
||||
bf1ee7b66f6e6349624d8760c00669480460a55d
|
@ -1 +0,0 @@
|
||||
37c9970ec38f64e7ccecbe17efbabdaabe8da2ea
|
@ -0,0 +1 @@
|
||||
2ed20db0ccc53f966cc211aeb3b623dcf69d2cca
|
@ -1 +0,0 @@
|
||||
7103c3482c728a9788922aa39e39a5ed2bdd3a11
|
@ -0,0 +1 @@
|
||||
e06d99480f44eede9302fb7dda3c62f3e8ff68e1
|
@ -1 +0,0 @@
|
||||
89d389c1020fac58f462819ad822c9b09e52f563
|
@ -0,0 +1 @@
|
||||
64ff3b354c21fc371cfeef208158af92cdf93316
|
@ -1 +0,0 @@
|
||||
b62e34e522f3afa9c3f1655b97b995ff6ba2592d
|
@ -0,0 +1 @@
|
||||
2dffc0dec40028ca958a0a2fdf0628fd8e8354d0
|
@ -1 +0,0 @@
|
||||
0c92f6b03eb226586b431a834dca90a1f2cd85b8
|
@ -0,0 +1 @@
|
||||
d0ed3d77875bab18abe45706ec8b5d441cf46bdc
|
@ -1 +0,0 @@
|
||||
3a659287ba728f7a0d81694ce32e9ef741a13c19
|
@ -0,0 +1 @@
|
||||
8bb05a98bb9c2615ad1262980dd6b07802bafa1d
|
@ -19,6 +19,7 @@
|
||||
package org.elasticsearch.common.geo.parsers;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoShapeType;
|
||||
import org.elasticsearch.common.geo.builders.CoordinatesBuilder;
|
||||
@ -77,7 +78,9 @@ public class GeoWKTParser {
|
||||
final GeoShapeFieldMapper shapeMapper)
|
||||
throws IOException, ElasticsearchParseException {
|
||||
try (StringReader reader = new StringReader(parser.text())) {
|
||||
boolean ignoreZValue = (shapeMapper != null && shapeMapper.ignoreZValue().value() == true);
|
||||
Explicit<Boolean> ignoreZValue = (shapeMapper == null) ? GeoShapeFieldMapper.Defaults.IGNORE_Z_VALUE :
|
||||
shapeMapper.ignoreZValue();
|
||||
Explicit<Boolean> coerce = (shapeMapper == null) ? GeoShapeFieldMapper.Defaults.COERCE : shapeMapper.coerce();
|
||||
// setup the tokenizer; configured to read words w/o numbers
|
||||
StreamTokenizer tokenizer = new StreamTokenizer(reader);
|
||||
tokenizer.resetSyntax();
|
||||
@ -90,14 +93,15 @@ public class GeoWKTParser {
|
||||
tokenizer.wordChars('.', '.');
|
||||
tokenizer.whitespaceChars(0, ' ');
|
||||
tokenizer.commentChar('#');
|
||||
ShapeBuilder builder = parseGeometry(tokenizer, shapeType, ignoreZValue);
|
||||
ShapeBuilder builder = parseGeometry(tokenizer, shapeType, ignoreZValue.value(), coerce.value());
|
||||
checkEOF(tokenizer);
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
/** parse geometry from the stream tokenizer */
|
||||
private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType, final boolean ignoreZValue)
|
||||
private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType, final boolean ignoreZValue,
|
||||
final boolean coerce)
|
||||
throws IOException, ElasticsearchParseException {
|
||||
final GeoShapeType type = GeoShapeType.forName(nextWord(stream));
|
||||
if (shapeType != null && shapeType != GeoShapeType.GEOMETRYCOLLECTION) {
|
||||
@ -107,21 +111,21 @@ public class GeoWKTParser {
|
||||
}
|
||||
switch (type) {
|
||||
case POINT:
|
||||
return parsePoint(stream, ignoreZValue);
|
||||
return parsePoint(stream, ignoreZValue, coerce);
|
||||
case MULTIPOINT:
|
||||
return parseMultiPoint(stream, ignoreZValue);
|
||||
return parseMultiPoint(stream, ignoreZValue, coerce);
|
||||
case LINESTRING:
|
||||
return parseLine(stream, ignoreZValue);
|
||||
return parseLine(stream, ignoreZValue, coerce);
|
||||
case MULTILINESTRING:
|
||||
return parseMultiLine(stream, ignoreZValue);
|
||||
return parseMultiLine(stream, ignoreZValue, coerce);
|
||||
case POLYGON:
|
||||
return parsePolygon(stream, ignoreZValue);
|
||||
return parsePolygon(stream, ignoreZValue, coerce);
|
||||
case MULTIPOLYGON:
|
||||
return parseMultiPolygon(stream, ignoreZValue);
|
||||
return parseMultiPolygon(stream, ignoreZValue, coerce);
|
||||
case ENVELOPE:
|
||||
return parseBBox(stream);
|
||||
case GEOMETRYCOLLECTION:
|
||||
return parseGeometryCollection(stream, ignoreZValue);
|
||||
return parseGeometryCollection(stream, ignoreZValue, coerce);
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown geometry type: " + type);
|
||||
}
|
||||
@ -142,7 +146,7 @@ public class GeoWKTParser {
|
||||
return new EnvelopeBuilder(new Coordinate(minLon, maxLat), new Coordinate(maxLon, minLat));
|
||||
}
|
||||
|
||||
private static PointBuilder parsePoint(StreamTokenizer stream, final boolean ignoreZValue)
|
||||
private static PointBuilder parsePoint(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce)
|
||||
throws IOException, ElasticsearchParseException {
|
||||
if (nextEmptyOrOpen(stream).equals(EMPTY)) {
|
||||
return null;
|
||||
@ -155,12 +159,12 @@ public class GeoWKTParser {
|
||||
return pt;
|
||||
}
|
||||
|
||||
private static List<Coordinate> parseCoordinateList(StreamTokenizer stream, final boolean ignoreZValue)
|
||||
private static List<Coordinate> parseCoordinateList(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce)
|
||||
throws IOException, ElasticsearchParseException {
|
||||
CoordinatesBuilder coordinates = new CoordinatesBuilder();
|
||||
boolean isOpenParen = false;
|
||||
if (isNumberNext(stream) || (isOpenParen = nextWord(stream).equals(LPAREN))) {
|
||||
coordinates.coordinate(parseCoordinate(stream, ignoreZValue));
|
||||
coordinates.coordinate(parseCoordinate(stream, ignoreZValue, coerce));
|
||||
}
|
||||
|
||||
if (isOpenParen && nextCloser(stream).equals(RPAREN) == false) {
|
||||
@ -170,7 +174,7 @@ public class GeoWKTParser {
|
||||
while (nextCloserOrComma(stream).equals(COMMA)) {
|
||||
isOpenParen = false;
|
||||
if (isNumberNext(stream) || (isOpenParen = nextWord(stream).equals(LPAREN))) {
|
||||
coordinates.coordinate(parseCoordinate(stream, ignoreZValue));
|
||||
coordinates.coordinate(parseCoordinate(stream, ignoreZValue, coerce));
|
||||
}
|
||||
if (isOpenParen && nextCloser(stream).equals(RPAREN) == false) {
|
||||
throw new ElasticsearchParseException("expected: " + RPAREN + " but found: " + tokenString(stream), stream.lineno());
|
||||
@ -179,7 +183,7 @@ public class GeoWKTParser {
|
||||
return coordinates.build();
|
||||
}
|
||||
|
||||
private static Coordinate parseCoordinate(StreamTokenizer stream, final boolean ignoreZValue)
|
||||
private static Coordinate parseCoordinate(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce)
|
||||
throws IOException, ElasticsearchParseException {
|
||||
final double lon = nextNumber(stream);
|
||||
final double lat = nextNumber(stream);
|
||||
@ -190,71 +194,98 @@ public class GeoWKTParser {
|
||||
return z == null ? new Coordinate(lon, lat) : new Coordinate(lon, lat, z);
|
||||
}
|
||||
|
||||
private static MultiPointBuilder parseMultiPoint(StreamTokenizer stream, final boolean ignoreZValue)
|
||||
private static MultiPointBuilder parseMultiPoint(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce)
|
||||
throws IOException, ElasticsearchParseException {
|
||||
String token = nextEmptyOrOpen(stream);
|
||||
if (token.equals(EMPTY)) {
|
||||
return null;
|
||||
}
|
||||
return new MultiPointBuilder(parseCoordinateList(stream, ignoreZValue));
|
||||
return new MultiPointBuilder(parseCoordinateList(stream, ignoreZValue, coerce));
|
||||
}
|
||||
|
||||
private static LineStringBuilder parseLine(StreamTokenizer stream, final boolean ignoreZValue)
|
||||
private static LineStringBuilder parseLine(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce)
|
||||
throws IOException, ElasticsearchParseException {
|
||||
String token = nextEmptyOrOpen(stream);
|
||||
if (token.equals(EMPTY)) {
|
||||
return null;
|
||||
}
|
||||
return new LineStringBuilder(parseCoordinateList(stream, ignoreZValue));
|
||||
return new LineStringBuilder(parseCoordinateList(stream, ignoreZValue, coerce));
|
||||
}
|
||||
|
||||
private static MultiLineStringBuilder parseMultiLine(StreamTokenizer stream, final boolean ignoreZValue)
|
||||
// A LinearRing is closed LineString with 4 or more positions. The first and last positions
|
||||
// are equivalent (they represent equivalent points).
|
||||
private static LineStringBuilder parseLinearRing(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce)
|
||||
throws IOException, ElasticsearchParseException {
|
||||
String token = nextEmptyOrOpen(stream);
|
||||
if (token.equals(EMPTY)) {
|
||||
return null;
|
||||
}
|
||||
List<Coordinate> coordinates = parseCoordinateList(stream, ignoreZValue, coerce);
|
||||
int coordinatesNeeded = coerce ? 3 : 4;
|
||||
if (coordinates.size() >= coordinatesNeeded) {
|
||||
if (!coordinates.get(0).equals(coordinates.get(coordinates.size() - 1))) {
|
||||
if (coerce == true) {
|
||||
coordinates.add(coordinates.get(0));
|
||||
} else {
|
||||
throw new ElasticsearchParseException("invalid LinearRing found (coordinates are not closed)");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (coordinates.size() < 4) {
|
||||
throw new ElasticsearchParseException("invalid number of points in LinearRing (found [{}] - must be >= 4)",
|
||||
coordinates.size());
|
||||
}
|
||||
return new LineStringBuilder(coordinates);
|
||||
}
|
||||
|
||||
private static MultiLineStringBuilder parseMultiLine(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce)
|
||||
throws IOException, ElasticsearchParseException {
|
||||
String token = nextEmptyOrOpen(stream);
|
||||
if (token.equals(EMPTY)) {
|
||||
return null;
|
||||
}
|
||||
MultiLineStringBuilder builder = new MultiLineStringBuilder();
|
||||
builder.linestring(parseLine(stream, ignoreZValue));
|
||||
builder.linestring(parseLine(stream, ignoreZValue, coerce));
|
||||
while (nextCloserOrComma(stream).equals(COMMA)) {
|
||||
builder.linestring(parseLine(stream, ignoreZValue));
|
||||
builder.linestring(parseLine(stream, ignoreZValue, coerce));
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
private static PolygonBuilder parsePolygon(StreamTokenizer stream, final boolean ignoreZValue)
|
||||
private static PolygonBuilder parsePolygon(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce)
|
||||
throws IOException, ElasticsearchParseException {
|
||||
if (nextEmptyOrOpen(stream).equals(EMPTY)) {
|
||||
return null;
|
||||
}
|
||||
PolygonBuilder builder = new PolygonBuilder(parseLine(stream, ignoreZValue), ShapeBuilder.Orientation.RIGHT);
|
||||
PolygonBuilder builder = new PolygonBuilder(parseLinearRing(stream, ignoreZValue, coerce), ShapeBuilder.Orientation.RIGHT);
|
||||
while (nextCloserOrComma(stream).equals(COMMA)) {
|
||||
builder.hole(parseLine(stream, ignoreZValue));
|
||||
builder.hole(parseLinearRing(stream, ignoreZValue, coerce));
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
private static MultiPolygonBuilder parseMultiPolygon(StreamTokenizer stream, final boolean ignoreZValue)
|
||||
private static MultiPolygonBuilder parseMultiPolygon(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce)
|
||||
throws IOException, ElasticsearchParseException {
|
||||
if (nextEmptyOrOpen(stream).equals(EMPTY)) {
|
||||
return null;
|
||||
}
|
||||
MultiPolygonBuilder builder = new MultiPolygonBuilder().polygon(parsePolygon(stream, ignoreZValue));
|
||||
MultiPolygonBuilder builder = new MultiPolygonBuilder().polygon(parsePolygon(stream, ignoreZValue, coerce));
|
||||
while (nextCloserOrComma(stream).equals(COMMA)) {
|
||||
builder.polygon(parsePolygon(stream, ignoreZValue));
|
||||
builder.polygon(parsePolygon(stream, ignoreZValue, coerce));
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer stream, final boolean ignoreZValue)
|
||||
private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer stream, final boolean ignoreZValue,
|
||||
final boolean coerce)
|
||||
throws IOException, ElasticsearchParseException {
|
||||
if (nextEmptyOrOpen(stream).equals(EMPTY)) {
|
||||
return null;
|
||||
}
|
||||
GeometryCollectionBuilder builder = new GeometryCollectionBuilder().shape(
|
||||
parseGeometry(stream, GeoShapeType.GEOMETRYCOLLECTION, ignoreZValue));
|
||||
parseGeometry(stream, GeoShapeType.GEOMETRYCOLLECTION, ignoreZValue, coerce));
|
||||
while (nextCloserOrComma(stream).equals(COMMA)) {
|
||||
builder.shape(parseGeometry(stream, null, ignoreZValue));
|
||||
builder.shape(parseGeometry(stream, null, ignoreZValue, coerce));
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
@ -299,6 +299,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||
RemoteClusterService.ENABLE_REMOTE_CLUSTERS,
|
||||
RemoteClusterService.SEARCH_ENABLE_REMOTE_CLUSTERS,
|
||||
RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE,
|
||||
RemoteClusterService.REMOTE_CLUSTER_COMPRESS,
|
||||
TransportService.TRACE_LOG_EXCLUDE_SETTING,
|
||||
TransportService.TRACE_LOG_INCLUDE_SETTING,
|
||||
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
|
||||
|
@ -135,7 +135,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
||||
|
||||
public Builder coerce(boolean coerce) {
|
||||
this.coerce = coerce;
|
||||
return builder;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -155,7 +155,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
||||
|
||||
public Builder ignoreMalformed(boolean ignoreMalformed) {
|
||||
this.ignoreMalformed = ignoreMalformed;
|
||||
return builder;
|
||||
return this;
|
||||
}
|
||||
|
||||
protected Explicit<Boolean> ignoreMalformed(BuilderContext context) {
|
||||
|
@ -33,10 +33,13 @@ import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class OsProbe {
|
||||
@ -547,16 +550,13 @@ public class OsProbe {
|
||||
final Optional<String> maybePrettyNameLine =
|
||||
prettyNameLines.size() == 1 ? Optional.of(prettyNameLines.get(0)) : Optional.empty();
|
||||
if (maybePrettyNameLine.isPresent()) {
|
||||
final String prettyNameLine = maybePrettyNameLine.get();
|
||||
final String[] prettyNameFields = prettyNameLine.split("=");
|
||||
assert prettyNameFields.length == 2 : prettyNameLine;
|
||||
if (prettyNameFields[1].length() >= 3 &&
|
||||
(prettyNameFields[1].startsWith("\"") && prettyNameFields[1].endsWith("\"")) ||
|
||||
(prettyNameFields[1].startsWith("'") && prettyNameFields[1].endsWith("'"))) {
|
||||
return prettyNameFields[1].substring(1, prettyNameFields[1].length() - 1);
|
||||
} else {
|
||||
return prettyNameFields[1];
|
||||
}
|
||||
// we trim since some OS contain trailing space, for example, Oracle Linux Server 6.9 has a trailing space after the quote
|
||||
final String trimmedPrettyNameLine = maybePrettyNameLine.get().trim();
|
||||
final Matcher matcher = Pattern.compile("PRETTY_NAME=(\"?|'?)?([^\"']+)\\1").matcher(trimmedPrettyNameLine);
|
||||
final boolean matches = matcher.matches();
|
||||
assert matches : trimmedPrettyNameLine;
|
||||
assert matcher.groupCount() == 2 : trimmedPrettyNameLine;
|
||||
return matcher.group(2);
|
||||
} else {
|
||||
return Constants.OS_NAME;
|
||||
}
|
||||
@ -567,22 +567,33 @@ public class OsProbe {
|
||||
}
|
||||
|
||||
/**
|
||||
* The lines from {@code /etc/os-release} or {@code /usr/lib/os-release} as a fallback. These file represents identification of the
|
||||
* underlying operating system. The structure of the file is newlines of key-value pairs of shell-compatible variable assignments.
|
||||
* The lines from {@code /etc/os-release} or {@code /usr/lib/os-release} as a fallback, with an additional fallback to
|
||||
* {@code /etc/system-release}. These files represent identification of the underlying operating system. The structure of the file is
|
||||
* newlines of key-value pairs of shell-compatible variable assignments.
|
||||
*
|
||||
* @return the lines from {@code /etc/os-release} or {@code /usr/lib/os-release}
|
||||
* @throws IOException if an I/O exception occurs reading {@code /etc/os-release} or {@code /usr/lib/os-release}
|
||||
* @return the lines from {@code /etc/os-release} or {@code /usr/lib/os-release} or {@code /etc/system-release}
|
||||
* @throws IOException if an I/O exception occurs reading {@code /etc/os-release} or {@code /usr/lib/os-release} or
|
||||
* {@code /etc/system-release}
|
||||
*/
|
||||
@SuppressForbidden(reason = "access /etc/os-release or /usr/lib/os-release")
|
||||
@SuppressForbidden(reason = "access /etc/os-release or /usr/lib/os-release or /etc/system-release")
|
||||
List<String> readOsRelease() throws IOException {
|
||||
final List<String> lines;
|
||||
if (Files.exists(PathUtils.get("/etc/os-release"))) {
|
||||
lines = Files.readAllLines(PathUtils.get("/etc/os-release"));
|
||||
} else {
|
||||
assert lines != null && lines.isEmpty() == false;
|
||||
return lines;
|
||||
} else if (Files.exists(PathUtils.get("/usr/lib/os-release"))) {
|
||||
lines = Files.readAllLines(PathUtils.get("/usr/lib/os-release"));
|
||||
assert lines != null && lines.isEmpty() == false;
|
||||
return lines;
|
||||
} else if (Files.exists(PathUtils.get("/etc/system-release"))) {
|
||||
// fallback for older Red Hat-like OS
|
||||
lines = Files.readAllLines(PathUtils.get("/etc/system-release"));
|
||||
assert lines != null && lines.size() == 1;
|
||||
return Collections.singletonList("PRETTY_NAME=\"" + lines.get(0) + "\"");
|
||||
} else {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
assert lines != null && lines.isEmpty() == false;
|
||||
return lines;
|
||||
}
|
||||
|
||||
public OsStats osStats() {
|
||||
|
@ -23,7 +23,7 @@ import org.elasticsearch.search.aggregations.bucket.ParsedSingleBucketAggregatio
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class ParsedReverseNested extends ParsedSingleBucketAggregation implements Nested {
|
||||
public class ParsedReverseNested extends ParsedSingleBucketAggregation implements ReverseNested {
|
||||
|
||||
@Override
|
||||
public String getType() {
|
||||
|
@ -127,6 +127,7 @@ grant {
|
||||
// OS release on Linux
|
||||
permission java.io.FilePermission "/etc/os-release", "read";
|
||||
permission java.io.FilePermission "/usr/lib/os-release", "read";
|
||||
permission java.io.FilePermission "/etc/system-release", "read";
|
||||
|
||||
// io stats on Linux
|
||||
permission java.io.FilePermission "/proc/self/mountinfo", "read";
|
||||
|
@ -318,6 +318,31 @@ public class GeoWKTShapeParserTests extends BaseGeoParsingTestCase {
|
||||
assertEquals(shapeBuilder.numDimensions(), 3);
|
||||
}
|
||||
|
||||
public void testParseOpenPolygon() throws IOException {
|
||||
String openPolygon = "POLYGON ((100 5, 100 10, 90 10, 90 5))";
|
||||
|
||||
XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(openPolygon);
|
||||
XContentParser parser = createParser(xContentBuilder);
|
||||
parser.nextToken();
|
||||
|
||||
Settings indexSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build();
|
||||
|
||||
Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath());
|
||||
final GeoShapeFieldMapper defaultMapperBuilder = new GeoShapeFieldMapper.Builder("test").coerce(false).build(mockBuilderContext);
|
||||
ElasticsearchParseException exception = expectThrows(ElasticsearchParseException.class,
|
||||
() -> ShapeParser.parse(parser, defaultMapperBuilder));
|
||||
assertEquals("invalid LinearRing found (coordinates are not closed)", exception.getMessage());
|
||||
|
||||
final GeoShapeFieldMapper coercingMapperBuilder = new GeoShapeFieldMapper.Builder("test").coerce(true).build(mockBuilderContext);
|
||||
ShapeBuilder<?, ?> shapeBuilder = ShapeParser.parse(parser, coercingMapperBuilder);
|
||||
assertNotNull(shapeBuilder);
|
||||
assertEquals("polygon ((100.0 5.0, 100.0 10.0, 90.0 10.0, 90.0 5.0, 100.0 5.0))", shapeBuilder.toWKT());
|
||||
}
|
||||
|
||||
public void testParseSelfCrossingPolygon() throws IOException {
|
||||
// test self crossing ccw poly not crossing dateline
|
||||
List<Coordinate> shellCoordinates = new ArrayList<>();
|
||||
|
@ -27,6 +27,7 @@ import java.math.BigInteger;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.hamcrest.Matchers.allOf;
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
@ -55,12 +56,10 @@ public class OsProbeTests extends ESTestCase {
|
||||
List<String> readOsRelease() throws IOException {
|
||||
assert Constants.LINUX : Constants.OS_NAME;
|
||||
if (prettyName != null) {
|
||||
final String quote = randomFrom("\"", "'", null);
|
||||
if (quote == null) {
|
||||
return Arrays.asList("NAME=" + randomAlphaOfLength(16), "PRETTY_NAME=" + prettyName);
|
||||
} else {
|
||||
return Arrays.asList("NAME=" + randomAlphaOfLength(16), "PRETTY_NAME=" + quote + prettyName + quote);
|
||||
}
|
||||
final String quote = randomFrom("\"", "'", "");
|
||||
final String space = randomFrom(" ", "");
|
||||
final String prettyNameLine = String.format(Locale.ROOT, "PRETTY_NAME=%s%s%s%s", quote, prettyName, quote, space);
|
||||
return Arrays.asList("NAME=" + randomAlphaOfLength(16), prettyNameLine);
|
||||
} else {
|
||||
return Collections.singletonList("NAME=" + randomAlphaOfLength(16));
|
||||
}
|
||||
|
@ -22,9 +22,11 @@ package org.elasticsearch.search.aggregations.bucket.nested;
|
||||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.InternalSingleBucketAggregationTestCase;
|
||||
import org.elasticsearch.search.aggregations.ParsedAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.ParsedSingleBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@ -49,4 +51,10 @@ public class InternalNestedTests extends InternalSingleBucketAggregationTestCase
|
||||
protected Class<? extends ParsedSingleBucketAggregation> implementationClass() {
|
||||
return ParsedNested.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertFromXContent(InternalNested aggregation, ParsedAggregation parsedAggregation) throws IOException {
|
||||
super.assertFromXContent(aggregation, parsedAggregation);
|
||||
assertTrue(parsedAggregation instanceof Nested);
|
||||
}
|
||||
}
|
||||
|
@ -22,9 +22,11 @@ package org.elasticsearch.search.aggregations.bucket.nested;
|
||||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.InternalSingleBucketAggregationTestCase;
|
||||
import org.elasticsearch.search.aggregations.ParsedAggregation;
|
||||
import org.elasticsearch.search.aggregations.bucket.ParsedSingleBucketAggregation;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@ -49,4 +51,10 @@ public class InternalReverseNestedTests extends InternalSingleBucketAggregationT
|
||||
protected Class<? extends ParsedSingleBucketAggregation> implementationClass() {
|
||||
return ParsedReverseNested.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void assertFromXContent(InternalReverseNested aggregation, ParsedAggregation parsedAggregation) throws IOException {
|
||||
super.assertFromXContent(aggregation, parsedAggregation);
|
||||
assertTrue(parsedAggregation instanceof ReverseNested);
|
||||
}
|
||||
}
|
||||
|
@ -99,6 +99,7 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
||||
assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING));
|
||||
assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_NODE_ATTRIBUTE));
|
||||
assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE));
|
||||
assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_CLUSTER_COMPRESS));
|
||||
}
|
||||
|
||||
public void testRemoteClusterSeedSetting() {
|
||||
|
@ -28,7 +28,6 @@ import static org.hamcrest.core.Is.is;
|
||||
public class FollowIndexIT extends ESCCRRestTestCase {
|
||||
|
||||
public void testDowngradeRemoteClusterToBasic() throws Exception {
|
||||
assumeFalse("windows is the worst", Constants.WINDOWS);
|
||||
if ("follow".equals(targetCluster) == false) {
|
||||
return;
|
||||
}
|
||||
@ -78,27 +77,30 @@ public class FollowIndexIT extends ESCCRRestTestCase {
|
||||
assertThat(indexExists(index2), is(false));
|
||||
|
||||
// parse the logs and ensure that the auto-coordinator skipped coordination on the leader cluster
|
||||
assertBusy(() -> {
|
||||
final List<String> lines = Files.readAllLines(PathUtils.get(System.getProperty("log")));
|
||||
final Iterator<String> it = lines.iterator();
|
||||
boolean warn = false;
|
||||
while (it.hasNext()) {
|
||||
final String line = it.next();
|
||||
if (line.matches(".*\\[WARN\\s*\\]\\[o\\.e\\.x\\.c\\.a\\.AutoFollowCoordinator\\s*\\] \\[node-0\\] " +
|
||||
"failure occurred while fetching cluster state for auto follow pattern \\[test_pattern\\]")) {
|
||||
warn = true;
|
||||
break;
|
||||
// (does not work on windows...)
|
||||
if (Constants.WINDOWS == false) {
|
||||
assertBusy(() -> {
|
||||
final List<String> lines = Files.readAllLines(PathUtils.get(System.getProperty("log")));
|
||||
final Iterator<String> it = lines.iterator();
|
||||
boolean warn = false;
|
||||
while (it.hasNext()) {
|
||||
final String line = it.next();
|
||||
if (line.matches(".*\\[WARN\\s*\\]\\[o\\.e\\.x\\.c\\.a\\.AutoFollowCoordinator\\s*\\] \\[node-0\\] " +
|
||||
"failure occurred while fetching cluster state for auto follow pattern \\[test_pattern\\]")) {
|
||||
warn = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
assertTrue(warn);
|
||||
assertTrue(it.hasNext());
|
||||
final String lineAfterWarn = it.next();
|
||||
assertThat(
|
||||
lineAfterWarn,
|
||||
equalTo("org.elasticsearch.ElasticsearchStatusException: " +
|
||||
"can not fetch remote cluster state as the remote cluster [leader_cluster] is not licensed for [ccr]; " +
|
||||
"the license mode [BASIC] on cluster [leader_cluster] does not enable [ccr]"));
|
||||
});
|
||||
assertTrue(warn);
|
||||
assertTrue(it.hasNext());
|
||||
final String lineAfterWarn = it.next();
|
||||
assertThat(
|
||||
lineAfterWarn,
|
||||
equalTo("org.elasticsearch.ElasticsearchStatusException: " +
|
||||
"can not fetch remote cluster state as the remote cluster [leader_cluster] is not licensed for [ccr]; " +
|
||||
"the license mode [BASIC] on cluster [leader_cluster] does not enable [ccr]"));
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Manually following index2 also does not work after the downgrade:
|
||||
|
@ -25,7 +25,7 @@ public class RestCcrStatsAction extends BaseRestHandler {
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "ccr_auto_follow_stats";
|
||||
return "ccr_stats";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -26,7 +26,7 @@ public class RestFollowStatsAction extends BaseRestHandler {
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "ccr_stats";
|
||||
return "ccr_follower_stats";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -30,6 +30,7 @@ import org.elasticsearch.xpack.core.indexlifecycle.Phase;
|
||||
import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction;
|
||||
import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction;
|
||||
import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction;
|
||||
import org.elasticsearch.xpack.core.indexlifecycle.ShrinkStep;
|
||||
import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey;
|
||||
import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep;
|
||||
import org.junit.Before;
|
||||
@ -178,6 +179,41 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
|
||||
assertBusy(() -> assertFalse(indexExists(shrunkenOriginalIndex)));
|
||||
}
|
||||
|
||||
public void testRetryFailedShrinkAction() throws Exception {
|
||||
int numShards = 6;
|
||||
int divisor = randomFrom(2, 3, 6);
|
||||
int expectedFinalShards = numShards / divisor;
|
||||
String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index;
|
||||
createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0));
|
||||
createNewSingletonPolicy("warm", new ShrinkAction(numShards + randomIntBetween(1, numShards)));
|
||||
updatePolicy(index, policy);
|
||||
assertBusy(() -> {
|
||||
String failedStep = getFailedStepForIndex(index);
|
||||
assertThat(failedStep, equalTo(ShrinkStep.NAME));
|
||||
});
|
||||
|
||||
// update policy to be correct
|
||||
createNewSingletonPolicy("warm", new ShrinkAction(expectedFinalShards));
|
||||
updatePolicy(index, policy);
|
||||
|
||||
// retry step
|
||||
Request retryRequest = new Request("POST", index + "/_ilm/retry");
|
||||
assertOK(client().performRequest(retryRequest));
|
||||
|
||||
// assert corrected policy is picked up and index is shrunken
|
||||
assertBusy(() -> {
|
||||
logger.error(explainIndex(index));
|
||||
assertTrue(indexExists(shrunkenIndex));
|
||||
assertTrue(aliasExists(shrunkenIndex, index));
|
||||
Map<String, Object> settings = getOnlyIndexSettings(shrunkenIndex);
|
||||
assertThat(getStepKeyForIndex(shrunkenIndex), equalTo(TerminalPolicyStep.KEY));
|
||||
assertThat(settings.get(IndexMetaData.SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(expectedFinalShards)));
|
||||
assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true"));
|
||||
});
|
||||
expectThrows(ResponseException.class, this::indexDocument);
|
||||
}
|
||||
|
||||
public void testRolloverAction() throws Exception {
|
||||
String originalIndex = index + "-000001";
|
||||
String secondIndex = index + "-000002";
|
||||
|
@ -101,7 +101,7 @@ public class ExecuteStepsUpdateTask extends ClusterStateUpdateTask {
|
||||
return state;
|
||||
} else {
|
||||
state = IndexLifecycleRunner.moveClusterStateToNextStep(index, state, currentStep.getKey(),
|
||||
currentStep.getNextStepKey(), nowSupplier);
|
||||
currentStep.getNextStepKey(), nowSupplier, false);
|
||||
}
|
||||
} else {
|
||||
// cluster state wait step so evaluate the
|
||||
@ -125,7 +125,7 @@ public class ExecuteStepsUpdateTask extends ClusterStateUpdateTask {
|
||||
return state;
|
||||
} else {
|
||||
state = IndexLifecycleRunner.moveClusterStateToNextStep(index, state, currentStep.getKey(),
|
||||
currentStep.getNextStepKey(), nowSupplier);
|
||||
currentStep.getNextStepKey(), nowSupplier, false);
|
||||
}
|
||||
} else {
|
||||
logger.trace("[{}] condition not met ({}) [{}], returning existing state",
|
||||
|
@ -271,11 +271,13 @@ public class IndexLifecycleRunner {
|
||||
* @param nextStepKey The next step to move the index into
|
||||
* @param nowSupplier The current-time supplier for updating when steps changed
|
||||
* @param stepRegistry The steps registry to check a step-key's existence in the index's current policy
|
||||
* @param forcePhaseDefinitionRefresh When true, step information will be recompiled from the latest version of the
|
||||
* policy. Otherwise, existing phase definition is used.
|
||||
* @return The updated cluster state where the index moved to <code>nextStepKey</code>
|
||||
*/
|
||||
static ClusterState moveClusterStateToStep(String indexName, ClusterState currentState, StepKey currentStepKey,
|
||||
StepKey nextStepKey, LongSupplier nowSupplier,
|
||||
PolicyStepsRegistry stepRegistry) {
|
||||
PolicyStepsRegistry stepRegistry, boolean forcePhaseDefinitionRefresh) {
|
||||
IndexMetaData idxMeta = currentState.getMetaData().index(indexName);
|
||||
Settings indexSettings = idxMeta.getSettings();
|
||||
String indexPolicySetting = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexSettings);
|
||||
@ -295,18 +297,19 @@ public class IndexLifecycleRunner {
|
||||
"] with policy [" + indexPolicySetting + "] does not exist");
|
||||
}
|
||||
|
||||
return IndexLifecycleRunner.moveClusterStateToNextStep(idxMeta.getIndex(), currentState, currentStepKey, nextStepKey, nowSupplier);
|
||||
return IndexLifecycleRunner.moveClusterStateToNextStep(idxMeta.getIndex(), currentState, currentStepKey,
|
||||
nextStepKey, nowSupplier, forcePhaseDefinitionRefresh);
|
||||
}
|
||||
|
||||
static ClusterState moveClusterStateToNextStep(Index index, ClusterState clusterState, StepKey currentStep, StepKey nextStep,
|
||||
LongSupplier nowSupplier) {
|
||||
LongSupplier nowSupplier, boolean forcePhaseDefinitionRefresh) {
|
||||
IndexMetaData idxMeta = clusterState.getMetaData().index(index);
|
||||
IndexLifecycleMetadata ilmMeta = clusterState.metaData().custom(IndexLifecycleMetadata.TYPE);
|
||||
LifecyclePolicyMetadata policyMetadata = ilmMeta.getPolicyMetadatas()
|
||||
.get(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings()));
|
||||
LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(idxMeta);
|
||||
LifecycleExecutionState newLifecycleState = moveExecutionStateToNextStep(policyMetadata,
|
||||
lifecycleState, currentStep, nextStep, nowSupplier);
|
||||
lifecycleState, currentStep, nextStep, nowSupplier, forcePhaseDefinitionRefresh);
|
||||
ClusterState.Builder newClusterStateBuilder = newClusterStateWithLifecycleState(index, clusterState, newLifecycleState);
|
||||
|
||||
return newClusterStateBuilder.build();
|
||||
@ -324,7 +327,7 @@ public class IndexLifecycleRunner {
|
||||
causeXContentBuilder.endObject();
|
||||
LifecycleExecutionState nextStepState = moveExecutionStateToNextStep(policyMetadata,
|
||||
LifecycleExecutionState.fromIndexMetadata(idxMeta), currentStep, new StepKey(currentStep.getPhase(),
|
||||
currentStep.getAction(), ErrorStep.NAME), nowSupplier);
|
||||
currentStep.getAction(), ErrorStep.NAME), nowSupplier, false);
|
||||
LifecycleExecutionState.Builder failedState = LifecycleExecutionState.builder(nextStepState);
|
||||
failedState.setFailedStep(currentStep.getName());
|
||||
failedState.setStepInfo(BytesReference.bytes(causeXContentBuilder).utf8ToString());
|
||||
@ -343,9 +346,9 @@ public class IndexLifecycleRunner {
|
||||
StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState);
|
||||
String failedStep = lifecycleState.getFailedStep();
|
||||
if (currentStepKey != null && ErrorStep.NAME.equals(currentStepKey.getName())
|
||||
&& Strings.isNullOrEmpty(failedStep) == false) {
|
||||
&& Strings.isNullOrEmpty(failedStep) == false) {
|
||||
StepKey nextStepKey = new StepKey(currentStepKey.getPhase(), currentStepKey.getAction(), failedStep);
|
||||
newState = moveClusterStateToStep(index, currentState, currentStepKey, nextStepKey, nowSupplier, stepRegistry);
|
||||
newState = moveClusterStateToStep(index, currentState, currentStepKey, nextStepKey, nowSupplier, stepRegistry, true);
|
||||
} else {
|
||||
throw new IllegalArgumentException("cannot retry an action for an index ["
|
||||
+ index + "] that has not encountered an error when running a Lifecycle Policy");
|
||||
@ -357,7 +360,8 @@ public class IndexLifecycleRunner {
|
||||
private static LifecycleExecutionState moveExecutionStateToNextStep(LifecyclePolicyMetadata policyMetadata,
|
||||
LifecycleExecutionState existingState,
|
||||
StepKey currentStep, StepKey nextStep,
|
||||
LongSupplier nowSupplier) {
|
||||
LongSupplier nowSupplier,
|
||||
boolean forcePhaseDefinitionRefresh) {
|
||||
long nowAsMillis = nowSupplier.getAsLong();
|
||||
LifecycleExecutionState.Builder updatedState = LifecycleExecutionState.builder(existingState);
|
||||
updatedState.setPhase(nextStep.getPhase());
|
||||
@ -369,7 +373,7 @@ public class IndexLifecycleRunner {
|
||||
updatedState.setFailedStep(null);
|
||||
updatedState.setStepInfo(null);
|
||||
|
||||
if (currentStep.getPhase().equals(nextStep.getPhase()) == false) {
|
||||
if (currentStep.getPhase().equals(nextStep.getPhase()) == false || forcePhaseDefinitionRefresh) {
|
||||
final String newPhaseDefinition;
|
||||
final Phase nextPhase;
|
||||
if ("new".equals(nextStep.getPhase()) || TerminalPolicyStep.KEY.equals(nextStep)) {
|
||||
|
@ -84,7 +84,7 @@ public class IndexLifecycleService
|
||||
|
||||
public ClusterState moveClusterStateToStep(ClusterState currentState, String indexName, StepKey currentStepKey, StepKey nextStepKey) {
|
||||
return IndexLifecycleRunner.moveClusterStateToStep(indexName, currentState, currentStepKey, nextStepKey,
|
||||
nowSupplier, policyRegistry);
|
||||
nowSupplier, policyRegistry, false);
|
||||
}
|
||||
|
||||
public ClusterState moveClusterStateToFailedStep(ClusterState currentState, String[] indices) {
|
||||
|
@ -68,7 +68,7 @@ public class MoveToNextStepUpdateTask extends ClusterStateUpdateTask {
|
||||
if (policy.equals(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexSettings))
|
||||
&& currentStepKey.equals(IndexLifecycleRunner.getCurrentStepKey(indexILMData))) {
|
||||
logger.trace("moving [{}] to next step ({})", index.getName(), nextStepKey);
|
||||
return IndexLifecycleRunner.moveClusterStateToNextStep(index, currentState, currentStepKey, nextStepKey, nowSupplier);
|
||||
return IndexLifecycleRunner.moveClusterStateToNextStep(index, currentState, currentStepKey, nextStepKey, nowSupplier, false);
|
||||
} else {
|
||||
// either the policy has changed or the step is now
|
||||
// not the same as when we submitted the update task. In
|
||||
|
@ -13,11 +13,14 @@ import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState;
|
||||
import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey;
|
||||
import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction;
|
||||
import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction.Request;
|
||||
import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction.Response;
|
||||
@ -55,6 +58,22 @@ public class TransportRetryAction extends TransportMasterNodeAction<Request, Res
|
||||
return indexLifecycleService.moveClusterStateToFailedStep(currentState, request.indices());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
for (String index : request.indices()) {
|
||||
IndexMetaData idxMeta = newState.metaData().index(index);
|
||||
LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(idxMeta);
|
||||
StepKey retryStep = new StepKey(lifecycleState.getPhase(), lifecycleState.getAction(), lifecycleState.getStep());
|
||||
if (idxMeta == null) {
|
||||
// The index has somehow been deleted - there shouldn't be any opportunity for this to happen, but just in case.
|
||||
logger.debug("index [" + index + "] has been deleted after moving to step [" +
|
||||
lifecycleState.getStep() + "], skipping async action check");
|
||||
return;
|
||||
}
|
||||
indexLifecycleService.maybeRunAsyncAction(newState, idxMeta, retryStep);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Response newResponse(boolean acknowledged) {
|
||||
return new Response(acknowledged);
|
||||
|
@ -668,7 +668,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase {
|
||||
.put(LifecycleSettings.LIFECYCLE_NAME, policy.getName()), LifecycleExecutionState.builder().build(), policyMetadatas);
|
||||
Index index = clusterState.metaData().index(indexName).getIndex();
|
||||
ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep,
|
||||
() -> now);
|
||||
() -> now, false);
|
||||
assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now);
|
||||
|
||||
LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder();
|
||||
@ -684,7 +684,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase {
|
||||
|
||||
clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas);
|
||||
index = clusterState.metaData().index(indexName).getIndex();
|
||||
newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now);
|
||||
newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now, false);
|
||||
assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now);
|
||||
}
|
||||
|
||||
@ -698,7 +698,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase {
|
||||
Collections.emptyList());
|
||||
Index index = clusterState.metaData().index(indexName).getIndex();
|
||||
ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep,
|
||||
() -> now);
|
||||
() -> now, false);
|
||||
assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now);
|
||||
|
||||
LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder();
|
||||
@ -711,7 +711,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase {
|
||||
|
||||
clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList());
|
||||
index = clusterState.metaData().index(indexName).getIndex();
|
||||
newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now);
|
||||
newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now, false);
|
||||
assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now);
|
||||
}
|
||||
|
||||
@ -725,7 +725,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase {
|
||||
Collections.emptyList());
|
||||
Index index = clusterState.metaData().index(indexName).getIndex();
|
||||
ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep,
|
||||
() -> now);
|
||||
() -> now, false);
|
||||
assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now);
|
||||
|
||||
LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder();
|
||||
@ -737,7 +737,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase {
|
||||
}
|
||||
clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList());
|
||||
index = clusterState.metaData().index(indexName).getIndex();
|
||||
newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now);
|
||||
newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now, false);
|
||||
assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now);
|
||||
}
|
||||
|
||||
@ -764,7 +764,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase {
|
||||
ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas);
|
||||
Index index = clusterState.metaData().index(indexName).getIndex();
|
||||
ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, currentStepKey,
|
||||
nextStepKey, () -> now, stepRegistry);
|
||||
nextStepKey, () -> now, stepRegistry, false);
|
||||
assertClusterStateOnNextStep(clusterState, index, currentStepKey, nextStepKey, newClusterState, now);
|
||||
}
|
||||
|
||||
@ -786,7 +786,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase {
|
||||
ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList());
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
|
||||
() -> IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, currentStepKey,
|
||||
nextStepKey, () -> now, stepRegistry));
|
||||
nextStepKey, () -> now, stepRegistry, false));
|
||||
assertThat(exception.getMessage(), equalTo("index [my_index] is not associated with an Index Lifecycle Policy"));
|
||||
}
|
||||
|
||||
@ -809,7 +809,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase {
|
||||
ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList());
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
|
||||
() -> IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, notCurrentStepKey,
|
||||
nextStepKey, () -> now, stepRegistry));
|
||||
nextStepKey, () -> now, stepRegistry, false));
|
||||
assertThat(exception.getMessage(), equalTo("index [my_index] is not on current step " +
|
||||
"[{\"phase\":\"not_current_phase\",\"action\":\"not_current_action\",\"name\":\"not_current_step\"}]"));
|
||||
}
|
||||
@ -832,7 +832,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase {
|
||||
ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList());
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
|
||||
() -> IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, currentStepKey,
|
||||
nextStepKey, () -> now, stepRegistry));
|
||||
nextStepKey, () -> now, stepRegistry, false));
|
||||
assertThat(exception.getMessage(),
|
||||
equalTo("step [{\"phase\":\"next_phase\",\"action\":\"next_action\",\"name\":\"next_step\"}] " +
|
||||
"for index [my_index] with policy [my_policy] does not exist"));
|
||||
@ -866,18 +866,26 @@ public class IndexLifecycleRunnerTests extends ESTestCase {
|
||||
String[] indices = new String[] { indexName };
|
||||
String policyName = "my_policy";
|
||||
long now = randomNonNegativeLong();
|
||||
StepKey failedStepKey = new StepKey("current_phase", "current_action", "current_step");
|
||||
StepKey failedStepKey = new StepKey("current_phase", MockAction.NAME, "current_step");
|
||||
StepKey errorStepKey = new StepKey(failedStepKey.getPhase(), failedStepKey.getAction(), ErrorStep.NAME);
|
||||
Step step = new MockStep(failedStepKey, null);
|
||||
LifecyclePolicy policy = createPolicy(policyName, failedStepKey, null);
|
||||
LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(),
|
||||
randomNonNegativeLong(), randomNonNegativeLong());
|
||||
|
||||
PolicyStepsRegistry policyRegistry = createOneStepPolicyStepRegistry(policyName, step, indexName);
|
||||
Settings.Builder indexSettingsBuilder = Settings.builder()
|
||||
.put(LifecycleSettings.LIFECYCLE_NAME, policyName);
|
||||
LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder();
|
||||
lifecycleState.setPhase(errorStepKey.getPhase());
|
||||
lifecycleState.setPhaseTime(now);
|
||||
lifecycleState.setAction(errorStepKey.getAction());
|
||||
lifecycleState.setActionTime(now);
|
||||
lifecycleState.setStep(errorStepKey.getName());
|
||||
lifecycleState.setStepTime(now);
|
||||
lifecycleState.setFailedStep(failedStepKey.getName());
|
||||
ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList());
|
||||
ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(),
|
||||
Collections.singletonList(policyMetadata));
|
||||
Index index = clusterState.metaData().index(indexName).getIndex();
|
||||
IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, () -> now);
|
||||
ClusterState nextClusterState = runner.moveClusterStateToFailedStep(clusterState, indices);
|
||||
@ -885,6 +893,41 @@ public class IndexLifecycleRunnerTests extends ESTestCase {
|
||||
nextClusterState, now);
|
||||
}
|
||||
|
||||
public void testMoveClusterStateToFailedStepWithUnknownStep() {
|
||||
String indexName = "my_index";
|
||||
String[] indices = new String[] { indexName };
|
||||
String policyName = "my_policy";
|
||||
long now = randomNonNegativeLong();
|
||||
StepKey failedStepKey = new StepKey("current_phase", MockAction.NAME, "current_step");
|
||||
StepKey errorStepKey = new StepKey(failedStepKey.getPhase(), failedStepKey.getAction(), ErrorStep.NAME);
|
||||
|
||||
StepKey registeredStepKey = new StepKey(randomFrom(failedStepKey.getPhase(), "other"),
|
||||
MockAction.NAME, "different_step");
|
||||
Step step = new MockStep(registeredStepKey, null);
|
||||
LifecyclePolicy policy = createPolicy(policyName, failedStepKey, null);
|
||||
LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(),
|
||||
randomNonNegativeLong(), randomNonNegativeLong());
|
||||
|
||||
PolicyStepsRegistry policyRegistry = createOneStepPolicyStepRegistry(policyName, step, indexName);
|
||||
Settings.Builder indexSettingsBuilder = Settings.builder()
|
||||
.put(LifecycleSettings.LIFECYCLE_NAME, policyName);
|
||||
LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder();
|
||||
lifecycleState.setPhase(errorStepKey.getPhase());
|
||||
lifecycleState.setPhaseTime(now);
|
||||
lifecycleState.setAction(errorStepKey.getAction());
|
||||
lifecycleState.setActionTime(now);
|
||||
lifecycleState.setStep(errorStepKey.getName());
|
||||
lifecycleState.setStepTime(now);
|
||||
lifecycleState.setFailedStep(failedStepKey.getName());
|
||||
ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(),
|
||||
Collections.singletonList(policyMetadata));
|
||||
IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, () -> now);
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
|
||||
() -> runner.moveClusterStateToFailedStep(clusterState, indices));
|
||||
assertThat(exception.getMessage(), equalTo("step [" + failedStepKey
|
||||
+ "] for index [my_index] with policy [my_policy] does not exist"));
|
||||
}
|
||||
|
||||
public void testMoveClusterStateToFailedStepIndexNotFound() {
|
||||
String existingIndexName = "my_index";
|
||||
String invalidIndexName = "does_not_exist";
|
||||
|
@ -82,7 +82,7 @@ public class DelimitedFileStructureFinder implements FileStructureFinder {
|
||||
int lineNumber = lineNumbers.get(index);
|
||||
Map<String, String> sampleRecord = new LinkedHashMap<>();
|
||||
Util.filterListToMap(sampleRecord, columnNames,
|
||||
trimFields ? row.stream().map(String::trim).collect(Collectors.toList()) : row);
|
||||
trimFields ? row.stream().map(field -> (field == null) ? null : field.trim()).collect(Collectors.toList()) : row);
|
||||
sampleRecords.add(sampleRecord);
|
||||
sampleMessages.add(
|
||||
sampleLines.subList(prevMessageEndLineNumber + 1, lineNumbers.get(index)).stream().collect(Collectors.joining("\n")));
|
||||
|
@ -1 +0,0 @@
|
||||
3757a90f73f505d40e6e200d1bacbff897f67548
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user