diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 2acf1288578..973f0ff429b 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -245,7 +245,7 @@ class VersionPropertiesLoader { elasticsearch ) } - String qualifier = systemProperties.getProperty("build.version_qualifier", "alpha1"); + String qualifier = systemProperties.getProperty("build.version_qualifier", ""); if (qualifier.isEmpty() == false) { if (qualifier.matches("(alpha|beta|rc)\\d+") == false) { throw new IllegalStateException("Invalid qualifier: " + qualifier) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index f5b6e6462bc..126ca0993a5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -66,7 +66,7 @@ class BuildPlugin implements Plugin { void apply(Project project) { if (project.pluginManager.hasPlugin('elasticsearch.standalone-rest-test')) { throw new InvalidUserDataException('elasticsearch.standalone-test, ' - + 'elasticearch.standalone-rest-test, and elasticsearch.build ' + + 'elasticsearch.standalone-rest-test, and elasticsearch.build ' + 'are mutually exclusive') } final String minimumGradleVersion diff --git a/buildSrc/version.properties b/buildSrc/version.properties index c1d759a5d1c..75e95be2a96 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0 -lucene = 8.0.0-snapshot-31d7dfe6b1 +lucene = 8.0.0-snapshot-6d9c714052 # optional dependencies spatial4j = 0.7 diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 8b740994e3b..09006db1d8e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -1417,6 +1417,38 @@ public class RestHighLevelClient implements Closeable { throw new IOException("Unable to parse response body for " + response, e); } } + + /** + * Defines a helper method for requests that can 404 and in which case will return an empty Optional + * otherwise tries to parse the response body + */ + protected final Optional performRequestAndParseOptionalEntity(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser + ) throws IOException { + Optional validationException = request.validate(); + if (validationException != null && validationException.isPresent()) { + throw validationException.get(); + } + Request req = requestConverter.apply(request); + req.setOptions(options); + Response response; + try { + response = client.performRequest(req); + } catch (ResponseException e) { + if (RestStatus.NOT_FOUND.getStatus() == e.getResponse().getStatusLine().getStatusCode()) { + return Optional.empty(); + } + throw parseResponseException(e); + } + + try { + return Optional.of(parseEntity(response.getEntity(), entityParser)); + } catch (Exception e) { + throw new IOException("Unable to parse response body for " + response, e); + } + } /** * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation @@ -1538,6 +1570,62 @@ public class RestHighLevelClient implements Closeable { } }; } + + /** + * Async request which returns empty Optionals in the case of 404s or parses entity into an Optional + */ + protected final void performRequestAsyncAndParseOptionalEntity(Req request, + CheckedFunction requestConverter, + RequestOptions options, + CheckedFunction entityParser, + ActionListener> listener) { + Optional validationException = request.validate(); + if (validationException != null && validationException.isPresent()) { + listener.onFailure(validationException.get()); + return; + } + Request req; + try { + req = requestConverter.apply(request); + } catch (Exception e) { + listener.onFailure(e); + return; + } + req.setOptions(options); + ResponseListener responseListener = wrapResponseListener404sOptional(response -> parseEntity(response.getEntity(), + entityParser), listener); + client.performRequestAsync(req, responseListener); + } + + final ResponseListener wrapResponseListener404sOptional(CheckedFunction responseConverter, + ActionListener> actionListener) { + return new ResponseListener() { + @Override + public void onSuccess(Response response) { + try { + actionListener.onResponse(Optional.of(responseConverter.apply(response))); + } catch (Exception e) { + IOException ioe = new IOException("Unable to parse response body for " + response, e); + onFailure(ioe); + } + } + + @Override + public void onFailure(Exception exception) { + if (exception instanceof ResponseException) { + ResponseException responseException = (ResponseException) exception; + Response response = responseException.getResponse(); + if (RestStatus.NOT_FOUND.getStatus() == response.getStatusLine().getStatusCode()) { + actionListener.onResponse(Optional.empty()); + } else { + actionListener.onFailure(parseResponseException(responseException)); + } + } else { + actionListener.onFailure(exception); + } + } + }; + } /** * Converts a {@link ResponseException} obtained from the low level REST client into an {@link ElasticsearchException}. diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java index fa36c02d02c..c6b7f1d4234 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java @@ -22,6 +22,8 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.rollup.DeleteRollupJobRequest; import org.elasticsearch.client.rollup.DeleteRollupJobResponse; +import org.elasticsearch.client.rollup.GetRollupIndexCapsRequest; +import org.elasticsearch.client.rollup.GetRollupIndexCapsResponse; import org.elasticsearch.client.rollup.GetRollupJobRequest; import org.elasticsearch.client.rollup.GetRollupJobResponse; import org.elasticsearch.client.rollup.GetRollupCapsRequest; @@ -219,4 +221,40 @@ public class RollupClient { listener, Collections.emptySet()); } + + /** + * Get the Rollup Index Capabilities of a rollup index or pattern + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public GetRollupIndexCapsResponse getRollupIndexCapabilities(GetRollupIndexCapsRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + RollupRequestConverters::getRollupIndexCaps, + options, + GetRollupIndexCapsResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Asynchronously Get the Rollup Index Capabilities of a rollup index or pattern + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getRollupIndexCapabilitiesAsync(GetRollupIndexCapsRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + RollupRequestConverters::getRollupIndexCaps, + options, + GetRollupIndexCapsResponse::fromXContent, + listener, + Collections.emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java index f662d592d1a..f0fe801ae59 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java @@ -24,6 +24,7 @@ import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.rollup.DeleteRollupJobRequest; import org.elasticsearch.client.rollup.GetRollupCapsRequest; +import org.elasticsearch.client.rollup.GetRollupIndexCapsRequest; import org.elasticsearch.client.rollup.GetRollupJobRequest; import org.elasticsearch.client.rollup.PutRollupJobRequest; import org.elasticsearch.client.rollup.StartRollupJobRequest; @@ -85,4 +86,14 @@ final class RollupRequestConverters { request.setEntity(createEntity(getRollupCapsRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } + + static Request getRollupIndexCaps(final GetRollupIndexCapsRequest getRollupIndexCapsRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder() + .addCommaSeparatedPathParts(getRollupIndexCapsRequest.indices()) + .addPathPartAsIs("_xpack", "rollup", "data") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(getRollupIndexCapsRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java index 3b957b2defb..4bf7565222a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java @@ -24,8 +24,11 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksReque import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.client.tasks.GetTaskRequest; +import org.elasticsearch.client.tasks.GetTaskResponse; import java.io.IOException; +import java.util.Optional; import static java.util.Collections.emptySet; @@ -67,6 +70,34 @@ public final class TasksClient { restHighLevelClient.performRequestAsyncAndParseEntity(request, TasksRequestConverters::listTasks, options, ListTasksResponse::fromXContent, listener, emptySet()); } + + /** + * Get a task using the Task Management API. + * See + * Task Management API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public Optional get(GetTaskRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseOptionalEntity(request, TasksRequestConverters::getTask, options, + GetTaskResponse::fromXContent); + } + + /** + * Get a task using the Task Management API. + * See + * Task Management API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener an actionlistener that takes an optional response (404s are returned as an empty Optional) + */ + public void getAsync(GetTaskRequest request, RequestOptions options, ActionListener> listener) { + + restHighLevelClient.performRequestAsyncAndParseOptionalEntity(request, TasksRequestConverters::getTask, options, + GetTaskResponse::fromXContent, listener); + } /** * Cancel one or more cluster tasks using the Task Management API. diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java index 45723dcc938..f0e9cf4e025 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksRequestConverters.java @@ -23,6 +23,8 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.client.RequestConverters.EndpointBuilder; +import org.elasticsearch.client.tasks.GetTaskRequest; final class TasksRequestConverters { @@ -54,4 +56,16 @@ final class TasksRequestConverters { .putParam("group_by", "none"); return request; } + + static Request getTask(GetTaskRequest getTaskRequest) { + String endpoint = new EndpointBuilder().addPathPartAsIs("_tasks") + .addPathPartAsIs(getTaskRequest.getNodeId() + ":" + Long.toString(getTaskRequest.getTaskId())) + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withTimeout(getTaskRequest.getTimeout()) + .withWaitForCompletion(getTaskRequest.getWaitForCompletion()); + return request; + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupCapsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupCapsResponse.java index 872dc7440a0..f4c516d015d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupCapsResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupCapsResponse.java @@ -18,10 +18,6 @@ */ package org.elasticsearch.client.rollup; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -30,7 +26,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; -public class GetRollupCapsResponse implements ToXContentObject { +public class GetRollupCapsResponse { private final Map jobs; @@ -42,16 +38,6 @@ public class GetRollupCapsResponse implements ToXContentObject { return jobs; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(); - for (Map.Entry entry : jobs.entrySet()) { - entry.getValue().toXContent(builder, params); - } - builder.endObject(); - return builder; - } - public static GetRollupCapsResponse fromXContent(final XContentParser parser) throws IOException { Map jobs = new HashMap<>(); XContentParser.Token token = parser.nextToken(); @@ -84,9 +70,4 @@ public class GetRollupCapsResponse implements ToXContentObject { GetRollupCapsResponse other = (GetRollupCapsResponse) obj; return Objects.equals(jobs, other.jobs); } - - @Override - public final String toString() { - return Strings.toString(this); - } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupIndexCapsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupIndexCapsRequest.java new file mode 100644 index 00000000000..6bb0c9b48d6 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupIndexCapsRequest.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Validatable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; + +public class GetRollupIndexCapsRequest implements Validatable, ToXContentObject { + private static final String INDICES = "indices"; + private static final String INDICES_OPTIONS = "indices_options"; + + private String[] indices; + private IndicesOptions options; + + public GetRollupIndexCapsRequest(final String... indices) { + this(indices, IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED); + } + + public GetRollupIndexCapsRequest(final String[] indices, final IndicesOptions options) { + if (indices == null || indices.length == 0) { + throw new IllegalArgumentException("[indices] must not be null or empty"); + } + for (String index : indices) { + if (Strings.isNullOrEmpty(index)) { + throw new IllegalArgumentException("[index] must not be null or empty"); + } + } + this.indices = indices; + this.options = Objects.requireNonNull(options); + } + + public IndicesOptions indicesOptions() { + return options; + } + + public String[] indices() { + return indices; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.array(INDICES, indices); + builder.startObject(INDICES_OPTIONS); + { + options.toXContent(builder, params); + } + builder.endObject(); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices), options); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetRollupIndexCapsRequest other = (GetRollupIndexCapsRequest) obj; + return Arrays.equals(indices, other.indices) + && Objects.equals(options, other.options); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupIndexCapsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupIndexCapsResponse.java new file mode 100644 index 00000000000..831e10f1747 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/GetRollupIndexCapsResponse.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +public class GetRollupIndexCapsResponse { + + private final Map jobs; + + public GetRollupIndexCapsResponse(final Map jobs) { + this.jobs = Collections.unmodifiableMap(Objects.requireNonNull(jobs)); + } + + public Map getJobs() { + return jobs; + } + + public static GetRollupIndexCapsResponse fromXContent(final XContentParser parser) throws IOException { + Map jobs = new HashMap<>(); + XContentParser.Token token = parser.nextToken(); + if (token.equals(XContentParser.Token.START_OBJECT)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token.equals(XContentParser.Token.FIELD_NAME)) { + String pattern = parser.currentName(); + + RollableIndexCaps cap = RollableIndexCaps.PARSER.apply(pattern).apply(parser, null); + jobs.put(pattern, cap); + } + } + } + return new GetRollupIndexCapsResponse(jobs); + } + + @Override + public int hashCode() { + return Objects.hash(jobs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetRollupIndexCapsResponse other = (GetRollupIndexCapsResponse) obj; + return Objects.equals(jobs, other.jobs); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/tasks/GetTaskRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/tasks/GetTaskRequest.java new file mode 100644 index 00000000000..0dc31689375 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/tasks/GetTaskRequest.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.tasks; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.unit.TimeValue; + +import java.util.Objects; +import java.util.Optional; + +public class GetTaskRequest implements Validatable { + private final String nodeId; + private final long taskId; + private boolean waitForCompletion = false; + private TimeValue timeout = null; + + public GetTaskRequest(String nodeId, long taskId) { + this.nodeId = nodeId; + this.taskId = taskId; + } + + public String getNodeId() { + return nodeId; + } + + public long getTaskId() { + return taskId; + } + + /** + * Should this request wait for all found tasks to complete? + */ + public boolean getWaitForCompletion() { + return waitForCompletion; + } + + /** + * Should this request wait for all found tasks to complete? + */ + public GetTaskRequest setWaitForCompletion(boolean waitForCompletion) { + this.waitForCompletion = waitForCompletion; + return this; + } + + /** + * Timeout to wait for any async actions this request must take. It must take anywhere from 0 to 2. + */ + public TimeValue getTimeout() { + return timeout; + } + + /** + * Timeout to wait for any async actions this request must take. + */ + public GetTaskRequest setTimeout(TimeValue timeout) { + this.timeout = timeout; + return this; + } + + @Override + public Optional validate() { + final ValidationException validationException = new ValidationException(); + if (timeout != null && !waitForCompletion) { + validationException.addValidationError("Timeout settings are only accepted if waitForCompletion is also set"); + } + if (validationException.validationErrors().isEmpty()) { + return Optional.empty(); + } + return Optional.of(validationException); + } + + @Override + public int hashCode() { + return Objects.hash(nodeId, taskId, waitForCompletion, timeout); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GetTaskRequest other = (GetTaskRequest) obj; + return Objects.equals(nodeId, other.nodeId) && + taskId == other.taskId && + waitForCompletion == other.waitForCompletion && + Objects.equals(timeout, other.timeout); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/tasks/GetTaskResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/tasks/GetTaskResponse.java new file mode 100644 index 00000000000..05d40f12f5b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/tasks/GetTaskResponse.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.tasks; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.tasks.TaskInfo; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public class GetTaskResponse { + private final boolean completed; + private final TaskInfo taskInfo; + public static final ParseField COMPLETED = new ParseField("completed"); + public static final ParseField TASK = new ParseField("task"); + + public GetTaskResponse(boolean completed, TaskInfo taskInfo) { + this.completed = completed; + this.taskInfo = taskInfo; + } + + public boolean isCompleted() { + return completed; + } + + public TaskInfo getTaskInfo() { + return taskInfo; + } + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("get_task", + true, a -> new GetTaskResponse((boolean) a[0], (TaskInfo) a[1])); + static { + PARSER.declareBoolean(constructorArg(), COMPLETED); + PARSER.declareObject(constructorArg(), (p, c) -> TaskInfo.fromXContent(p), TASK); + } + + public static GetTaskResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java index ff27fe21c27..3b69f10344a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CustomRestHighLevelClientTests.java @@ -126,8 +126,11 @@ public class CustomRestHighLevelClientTests extends ESTestCase { "parseResponseException", "performRequest", "performRequestAndParseEntity", + "performRequestAndParseOptionalEntity", "performRequestAsync", - "performRequestAsyncAndParseEntity"}; + "performRequestAsyncAndParseEntity", + "performRequestAsyncAndParseOptionalEntity" + }; final Set protectedMethods = Arrays.stream(RestHighLevelClient.class.getDeclaredMethods()) .filter(method -> Modifier.isProtected(method.getModifiers())) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 2c5d279592f..62e6afe6ffb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -97,6 +97,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -675,8 +676,7 @@ public class RestHighLevelClientTests extends ESTestCase { "indices.put_alias", "mtermvectors", "render_search_template", - "scripts_painless_execute", - "tasks.get" + "scripts_painless_execute" }; //These API are not required for high-level client feature completeness String[] notRequiredApi = new String[] { @@ -777,8 +777,11 @@ public class RestHighLevelClientTests extends ESTestCase { assertThat("the return type for method [" + method + "] is incorrect", method.getReturnType().getSimpleName(), equalTo("boolean")); } else { - assertThat("the return type for method [" + method + "] is incorrect", - method.getReturnType().getSimpleName(), endsWith("Response")); + // It's acceptable for 404s to be represented as empty Optionals + if (!method.getReturnType().isAssignableFrom(Optional.class)) { + assertThat("the return type for method [" + method + "] is incorrect", + method.getReturnType().getSimpleName(), endsWith("Response")); + } } assertEquals("incorrect number of exceptions for method [" + method + "]", 1, method.getExceptionTypes().length); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java index ffe75bfc1b1..213ca8710a4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java @@ -33,6 +33,8 @@ import org.elasticsearch.client.rollup.DeleteRollupJobRequest; import org.elasticsearch.client.rollup.DeleteRollupJobResponse; import org.elasticsearch.client.rollup.GetRollupCapsRequest; import org.elasticsearch.client.rollup.GetRollupCapsResponse; +import org.elasticsearch.client.rollup.GetRollupIndexCapsRequest; +import org.elasticsearch.client.rollup.GetRollupIndexCapsResponse; import org.elasticsearch.client.rollup.GetRollupJobRequest; import org.elasticsearch.client.rollup.GetRollupJobResponse; import org.elasticsearch.client.rollup.GetRollupJobResponse.IndexerState; @@ -348,4 +350,116 @@ public class RollupIT extends ESRestHighLevelClientTestCase { List> valueCaps = fieldCaps.get("value").getAggs(); assertThat(valueCaps.size(), equalTo(SUPPORTED_METRICS.size())); } + + public void testGetRollupIndexCaps() throws Exception { + final Set values = new HashSet<>(); + double sum = 0.0d; + int max = Integer.MIN_VALUE; + int min = Integer.MAX_VALUE; + + final BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int minute = 0; minute < 60; minute++) { + for (int second = 0; second < 60; second = second + 10) { + final int value = randomIntBetween(0, 100); + + final IndexRequest indexRequest = new IndexRequest("docs", "doc"); + indexRequest.source(jsonBuilder() + .startObject() + .field("value", value) + .field("date", String.format(Locale.ROOT, "2018-01-01T00:%02d:%02dZ", minute, second)) + .endObject()); + bulkRequest.add(indexRequest); + + values.add(value); + sum += value; + if (value > max) { + max = value; + } + if (value < min) { + min = value; + } + } + } + + final int numDocs = bulkRequest.numberOfActions(); + + BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); + assertEquals(RestStatus.OK, bulkResponse.status()); + if (bulkResponse.hasFailures()) { + for (BulkItemResponse itemResponse : bulkResponse.getItems()) { + if (itemResponse.isFailed()) { + logger.fatal(itemResponse.getFailureMessage()); + } + } + } + assertFalse(bulkResponse.hasFailures()); + + RefreshResponse refreshResponse = highLevelClient().indices().refresh(new RefreshRequest("docs"), RequestOptions.DEFAULT); + assertEquals(0, refreshResponse.getFailedShards()); + + final String id = randomAlphaOfLength(10); + final String indexPattern = randomFrom("docs", "d*", "doc*"); + final String rollupIndex = randomFrom("rollup", "test"); + final String cron = "*/1 * * * * ?"; + final int pageSize = randomIntBetween(numDocs, numDocs * 10); + // TODO expand this to also test with histogram and terms? + final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY)); + final List metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS)); + final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600)); + + PutRollupJobRequest putRollupJobRequest = + new PutRollupJobRequest(new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groups, metrics, timeout)); + + final RollupClient rollupClient = highLevelClient().rollup(); + PutRollupJobResponse response = execute(putRollupJobRequest, rollupClient::putRollupJob, rollupClient::putRollupJobAsync); + assertTrue(response.isAcknowledged()); + + // wait for the PutJob api to create the index w/ metadata + highLevelClient().cluster().health(new ClusterHealthRequest(rollupIndex).waitForYellowStatus(), RequestOptions.DEFAULT); + + GetRollupIndexCapsRequest getRollupIndexCapsRequest = new GetRollupIndexCapsRequest(rollupIndex); + GetRollupIndexCapsResponse capsResponse = highLevelClient().rollup() + .getRollupIndexCapabilities(getRollupIndexCapsRequest, RequestOptions.DEFAULT); + + assertNotNull(capsResponse); + Map rolledPatterns = capsResponse.getJobs(); + assertThat(rolledPatterns.size(), equalTo(1)); + + RollableIndexCaps docsPattern = rolledPatterns.get(rollupIndex); + assertThat(docsPattern.getIndexName(), equalTo(rollupIndex)); + + List rollupJobs = docsPattern.getJobCaps(); + assertThat(rollupJobs.size(), equalTo(1)); + + RollupJobCaps jobCaps = rollupJobs.get(0); + assertThat(jobCaps.getJobID(), equalTo(id)); + assertThat(jobCaps.getRollupIndex(), equalTo(rollupIndex)); + assertThat(jobCaps.getIndexPattern(), equalTo(indexPattern)); + + Map fieldCaps = jobCaps.getFieldCaps(); + + List> timestampCaps = fieldCaps.get("date").getAggs(); + for (Map.Entry entry : timestampCaps.get(0).entrySet()) { + switch (entry.getKey()) { + case "agg": + assertThat(entry.getValue(), equalTo("date_histogram")); + break; + case "delay": + assertThat(entry.getValue(), equalTo("foo")); + break; + case "interval": + assertThat(entry.getValue(), equalTo("1d")); + break; + case "time_zone": + assertThat(entry.getValue(), equalTo("UTC")); + break; + default: + fail("Unknown field cap: [" + entry.getKey() + "]"); + } + } + + List> valueCaps = fieldCaps.get("value").getAggs(); + assertThat(valueCaps.size(), equalTo(SUPPORTED_METRICS.size())); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java index baa97cfa5b4..52e6d7d3bd7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java @@ -24,10 +24,21 @@ import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRespo import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; +import org.elasticsearch.client.tasks.GetTaskRequest; +import org.elasticsearch.client.tasks.GetTaskResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Optional; import static java.util.Collections.emptyList; import static org.hamcrest.Matchers.equalTo; @@ -60,6 +71,56 @@ public class TasksIT extends ESRestHighLevelClientTestCase { } assertTrue("List tasks were not found", listTasksFound); } + + public void testGetValidTask() throws IOException { + + // Run a Reindex to create a task + + final String sourceIndex = "source1"; + final String destinationIndex = "dest"; + Settings settings = Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build(); + createIndex(sourceIndex, settings); + createIndex(destinationIndex, settings); + BulkRequest bulkRequest = new BulkRequest() + .add(new IndexRequest(sourceIndex, "type", "1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .add(new IndexRequest(sourceIndex, "type", "2").source(Collections.singletonMap("foo2", "bar2"), XContentType.JSON)) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE); + assertEquals(RestStatus.OK, highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT).status()); + + // (need to use low level client because currently high level client + // doesn't support async return of task id - needs + // https://github.com/elastic/elasticsearch/pull/35202 ) + RestClient lowClient = highLevelClient().getLowLevelClient(); + Request request = new Request("POST", "_reindex"); + request.addParameter("wait_for_completion", "false"); + request.setJsonEntity("{" + " \"source\": {\n" + " \"index\": \"source1\"\n" + " },\n" + " \"dest\": {\n" + + " \"index\": \"dest\"\n" + " }" + "}"); + Response response = lowClient.performRequest(request); + Map map = entityAsMap(response); + Object taskId = map.get("task"); + assertNotNull(taskId); + + TaskId childTaskId = new TaskId(taskId.toString()); + GetTaskRequest gtr = new GetTaskRequest(childTaskId.getNodeId(), childTaskId.getId()); + gtr.setWaitForCompletion(randomBoolean()); + Optional getTaskResponse = execute(gtr, highLevelClient().tasks()::get, highLevelClient().tasks()::getAsync); + assertTrue(getTaskResponse.isPresent()); + GetTaskResponse taskResponse = getTaskResponse.get(); + if (gtr.getWaitForCompletion()) { + assertTrue(taskResponse.isCompleted()); + } + TaskInfo info = taskResponse.getTaskInfo(); + assertTrue(info.isCancellable()); + assertEquals("reindex from [source1] to [dest]", info.getDescription()); + assertEquals("indices:data/write/reindex", info.getAction()); + } + + public void testGetInvalidTask() throws IOException { + // Check 404s are returned as empty Optionals + GetTaskRequest gtr = new GetTaskRequest("doesNotExistNodeName", 123); + Optional getTaskResponse = execute(gtr, highLevelClient().tasks()::get, highLevelClient().tasks()::getAsync); + assertFalse(getTaskResponse.isPresent()); + } public void testCancelTasks() throws IOException { ListTasksRequest listRequest = new ListTasksRequest(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/tasks/GetTaskResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/tasks/GetTaskResponseTests.java new file mode 100644 index 00000000000..60a52a7ff3d --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/tasks/GetTaskResponseTests.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core.tasks; + +import org.elasticsearch.client.Requests; +import org.elasticsearch.client.tasks.GetTaskResponse; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.tasks.RawTaskStatus; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class GetTaskResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + this::createTestInstance, + this::toXContent, + GetTaskResponse::fromXContent) + .supportsUnknownFields(true) + .assertEqualsConsumer(this::assertEqualInstances) + .assertToXContentEquivalence(true) + .randomFieldsExcludeFilter(field ->field.endsWith("headers") || field.endsWith("status")) + .test(); + } + + private GetTaskResponse createTestInstance() { + return new GetTaskResponse(randomBoolean(), randomTaskInfo()); + } + + private void toXContent(GetTaskResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + { + builder.field(GetTaskResponse.COMPLETED.getPreferredName(), response.isCompleted()); + builder.startObject(GetTaskResponse.TASK.getPreferredName()); + response.getTaskInfo().toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + } + builder.endObject(); + } + + private void assertEqualInstances(GetTaskResponse expectedInstance, GetTaskResponse newInstance) { + assertEquals(expectedInstance.isCompleted(), newInstance.isCompleted()); + assertEquals(expectedInstance.getTaskInfo(), newInstance.getTaskInfo()); + } + + static TaskInfo randomTaskInfo() { + TaskId taskId = randomTaskId(); + String type = randomAlphaOfLength(5); + String action = randomAlphaOfLength(5); + Task.Status status = randomBoolean() ? randomRawTaskStatus() : null; + String description = randomBoolean() ? randomAlphaOfLength(5) : null; + long startTime = randomLong(); + long runningTimeNanos = randomLong(); + boolean cancellable = randomBoolean(); + TaskId parentTaskId = randomBoolean() ? TaskId.EMPTY_TASK_ID : randomTaskId(); + Map headers = randomBoolean() ? + Collections.emptyMap() : + Collections.singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5)); + return new TaskInfo(taskId, type, action, description, status, startTime, runningTimeNanos, cancellable, parentTaskId, headers); + } + + private static TaskId randomTaskId() { + return new TaskId(randomAlphaOfLength(5), randomLong()); + } + + private static RawTaskStatus randomRawTaskStatus() { + try (XContentBuilder builder = XContentBuilder.builder(Requests.INDEX_CONTENT_TYPE.xContent())) { + builder.startObject(); + int fields = between(0, 10); + for (int f = 0; f < fields; f++) { + builder.field(randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + builder.endObject(); + return new RawTaskStatus(BytesReference.bytes(builder)); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java index bf0edcfe91c..b8169a5a9f3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java @@ -38,6 +38,8 @@ import org.elasticsearch.client.rollup.DeleteRollupJobRequest; import org.elasticsearch.client.rollup.DeleteRollupJobResponse; import org.elasticsearch.client.rollup.GetRollupCapsRequest; import org.elasticsearch.client.rollup.GetRollupCapsResponse; +import org.elasticsearch.client.rollup.GetRollupIndexCapsRequest; +import org.elasticsearch.client.rollup.GetRollupIndexCapsResponse; import org.elasticsearch.client.rollup.GetRollupJobRequest; import org.elasticsearch.client.rollup.GetRollupJobResponse; import org.elasticsearch.client.rollup.GetRollupJobResponse.JobWrapper; @@ -406,6 +408,120 @@ public class RollupDocumentationIT extends ESRestHighLevelClientTestCase { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + @SuppressWarnings("unused") + public void testGetRollupIndexCaps() throws Exception { + RestHighLevelClient client = highLevelClient(); + + DateHistogramGroupConfig dateHistogram = + new DateHistogramGroupConfig("timestamp", DateHistogramInterval.HOUR, new DateHistogramInterval("7d"), "UTC"); // <1> + TermsGroupConfig terms = new TermsGroupConfig("hostname", "datacenter"); + HistogramGroupConfig histogram = new HistogramGroupConfig(5L, "load", "net_in", "net_out"); + GroupConfig groups = new GroupConfig(dateHistogram, histogram, terms); + List metrics = new ArrayList<>(); // <1> + metrics.add(new MetricConfig("temperature", Arrays.asList("min", "max", "sum"))); + metrics.add(new MetricConfig("voltage", Arrays.asList("avg", "value_count"))); + + //tag::x-pack-rollup-get-rollup-index-caps-setup + final String indexPattern = "docs"; + final String rollupIndexName = "rollup"; + final String cron = "*/1 * * * * ?"; + final int pageSize = 100; + final TimeValue timeout = null; + + String id = "job_1"; + RollupJobConfig config = new RollupJobConfig(id, indexPattern, rollupIndexName, cron, + pageSize, groups, metrics, timeout); + + PutRollupJobRequest request = new PutRollupJobRequest(config); + PutRollupJobResponse response = client.rollup().putRollupJob(request, RequestOptions.DEFAULT); + + boolean acknowledged = response.isAcknowledged(); + //end::x-pack-rollup-get-rollup-index-caps-setup + assertTrue(acknowledged); + + ClusterHealthRequest healthRequest = new ClusterHealthRequest(config.getRollupIndex()).waitForYellowStatus(); + ClusterHealthResponse healthResponse = client.cluster().health(healthRequest, RequestOptions.DEFAULT); + assertFalse(healthResponse.isTimedOut()); + assertThat(healthResponse.getStatus(), isOneOf(ClusterHealthStatus.YELLOW, ClusterHealthStatus.GREEN)); + + // Now that the job is created, we should have a rollup index with metadata. + // We can test out the caps API now. + + //tag::x-pack-rollup-get-rollup-index-caps-request + GetRollupIndexCapsRequest getRollupIndexCapsRequest = new GetRollupIndexCapsRequest("rollup"); + //end::x-pack-rollup-get-rollup-index-caps-request + + //tag::x-pack-rollup-get-rollup-index-caps-execute + GetRollupIndexCapsResponse capsResponse = client.rollup() + .getRollupIndexCapabilities(getRollupIndexCapsRequest, RequestOptions.DEFAULT); + //end::x-pack-rollup-get-rollup-index-caps-execute + + //tag::x-pack-rollup-get-rollup-index-caps-response + Map rolledPatterns = capsResponse.getJobs(); + + RollableIndexCaps docsPattern = rolledPatterns.get("rollup"); + + // indexName will be "rollup", the target index we requested + String indexName = docsPattern.getIndexName(); + + // Each index pattern can have multiple jobs that rolled it up, so `getJobCaps()` + // returns a list of jobs that rolled up the pattern + List rollupJobs = docsPattern.getJobCaps(); + RollupJobCaps jobCaps = rollupJobs.get(0); + + // jobID is the identifier we used when we created the job (e.g. `job1`) + String jobID = jobCaps.getJobID(); + + // rollupIndex is the location that the job stored it's rollup docs (e.g. `rollup`) + String rollupIndex = jobCaps.getRollupIndex(); + + // Finally, fieldCaps are the capabilities of individual fields in the config + // The key is the field name, and the value is a RollupFieldCaps object which + // provides more info. + Map fieldCaps = jobCaps.getFieldCaps(); + + // If we retrieve the "timestamp" field, it returns a list of maps. Each list + // item represents a different aggregation that can be run against the "timestamp" + // field, and any additional details specific to that agg (interval, etc) + List> timestampCaps = fieldCaps.get("timestamp").getAggs(); + assert timestampCaps.get(0).toString().equals("{agg=date_histogram, delay=7d, interval=1h, time_zone=UTC}"); + + // In contrast to the timestamp field, the temperature field has multiple aggs configured + List> temperatureCaps = fieldCaps.get("temperature").getAggs(); + assert temperatureCaps.toString().equals("[{agg=min}, {agg=max}, {agg=sum}]"); + //end::x-pack-rollup-get-rollup-index-caps-response + + assertThat(indexName, equalTo("rollup")); + assertThat(jobID, equalTo("job_1")); + assertThat(rollupIndex, equalTo("rollup")); + assertThat(fieldCaps.size(), equalTo(8)); + + // tag::x-pack-rollup-get-rollup-index-caps-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(GetRollupIndexCapsResponse response) { + + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-rollup-get-rollup-index-caps-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-rollup-get-rollup-index-caps-execute-async + client.rollup().getRollupIndexCapabilitiesAsync(getRollupIndexCapsRequest, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-rollup-get-rollup-index-caps-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + @SuppressWarnings("unused") public void testDeleteRollupJob() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/GetRollupCapsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/GetRollupCapsResponseTests.java index af595b210e9..a728b65cf64 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/GetRollupCapsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/GetRollupCapsResponseTests.java @@ -18,52 +18,13 @@ */ package org.elasticsearch.client.rollup; -import org.elasticsearch.client.rollup.job.config.DateHistogramGroupConfig; -import org.elasticsearch.client.rollup.job.config.GroupConfig; -import org.elasticsearch.client.rollup.job.config.HistogramGroupConfig; -import org.elasticsearch.client.rollup.job.config.MetricConfig; -import org.elasticsearch.client.rollup.job.config.RollupJobConfig; -import org.elasticsearch.client.rollup.job.config.RollupJobConfigTests; -import org.elasticsearch.client.rollup.job.config.TermsGroupConfig; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; -import org.elasticsearch.test.AbstractXContentTestCase; -import org.junit.Before; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; import java.util.Map; -import java.util.stream.Collectors; -import static java.util.Collections.singletonMap; - -public class GetRollupCapsResponseTests extends AbstractXContentTestCase { - - private Map indices; - - @Before - private void setupIndices() throws IOException { - int numIndices = randomIntBetween(1,5); - indices = new HashMap<>(numIndices); - for (int i = 0; i < numIndices; i++) { - String indexName = "index_" + randomAlphaOfLength(10); - int numJobs = randomIntBetween(1,5); - List jobs = new ArrayList<>(numJobs); - for (int j = 0; j < numJobs; j++) { - RollupJobConfig config = RollupJobConfigTests.randomRollupJobConfig(randomAlphaOfLength(10)); - jobs.add(new RollupJobCaps(config.getId(), config.getIndexPattern(), - config.getRollupIndex(), createRollupFieldCaps(config))); - } - RollableIndexCaps cap = new RollableIndexCaps(indexName, jobs); - indices.put(indexName, cap); - } - } +public class GetRollupCapsResponseTests extends RollupCapsResponseTestCase { @Override protected GetRollupCapsResponse createTestInstance() { @@ -71,82 +32,16 @@ public class GetRollupCapsResponseTests extends AbstractXContentTestCase entry : response.getJobs().entrySet()) { + entry.getValue().toXContent(builder, null); + } + builder.endObject(); } @Override - protected GetRollupCapsResponse doParseInstance(final XContentParser parser) throws IOException { + protected GetRollupCapsResponse fromXContent(XContentParser parser) throws IOException { return GetRollupCapsResponse.fromXContent(parser); } - - /** - * Lifted from core's RollupJobCaps, so that we can test without having to include this actual logic in the request - */ - private static Map createRollupFieldCaps(final RollupJobConfig rollupJobConfig) { - final Map>> tempFieldCaps = new HashMap<>(); - - final GroupConfig groupConfig = rollupJobConfig.getGroupConfig(); - if (groupConfig != null) { - // Create RollupFieldCaps for the date histogram - final DateHistogramGroupConfig dateHistogram = groupConfig.getDateHistogram(); - final Map dateHistogramAggCap = new HashMap<>(); - dateHistogramAggCap.put("agg", DateHistogramAggregationBuilder.NAME); - dateHistogramAggCap.put("interval", dateHistogram.getInterval().toString()); - if (dateHistogram.getDelay() != null) { - dateHistogramAggCap.put("delay", dateHistogram.getDelay().toString()); - } - dateHistogramAggCap.put("time_zone", dateHistogram.getTimeZone()); - - List> dateAggCaps = tempFieldCaps.getOrDefault(dateHistogram.getField(), new ArrayList<>()); - dateAggCaps.add(dateHistogramAggCap); - tempFieldCaps.put(dateHistogram.getField(), dateAggCaps); - - // Create RollupFieldCaps for the histogram - final HistogramGroupConfig histogram = groupConfig.getHistogram(); - if (histogram != null) { - final Map histogramAggCap = new HashMap<>(); - histogramAggCap.put("agg", HistogramAggregationBuilder.NAME); - histogramAggCap.put("interval", histogram.getInterval()); - Arrays.stream(rollupJobConfig.getGroupConfig().getHistogram().getFields()).forEach(field -> { - List> caps = tempFieldCaps.getOrDefault(field, new ArrayList<>()); - caps.add(histogramAggCap); - tempFieldCaps.put(field, caps); - }); - } - - // Create RollupFieldCaps for the term - final TermsGroupConfig terms = groupConfig.getTerms(); - if (terms != null) { - final Map termsAggCap = singletonMap("agg", TermsAggregationBuilder.NAME); - Arrays.stream(rollupJobConfig.getGroupConfig().getTerms().getFields()).forEach(field -> { - List> caps = tempFieldCaps.getOrDefault(field, new ArrayList<>()); - caps.add(termsAggCap); - tempFieldCaps.put(field, caps); - }); - } - } - - // Create RollupFieldCaps for the metrics - final List metricsConfig = rollupJobConfig.getMetricsConfig(); - if (metricsConfig.size() > 0) { - rollupJobConfig.getMetricsConfig().forEach(metricConfig -> { - final List> metrics = metricConfig.getMetrics().stream() - .map(metric -> singletonMap("agg", (Object) metric)) - .collect(Collectors.toList()); - metrics.forEach(m -> { - List> caps = tempFieldCaps - .getOrDefault(metricConfig.getField(), new ArrayList<>()); - caps.add(m); - tempFieldCaps.put(metricConfig.getField(), caps); - }); - }); - } - - return Collections.unmodifiableMap(tempFieldCaps.entrySet() - .stream() - .collect(Collectors.toMap(Map.Entry::getKey, - e -> new RollupJobCaps.RollupFieldCaps(e.getValue())))); - } - } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/GetRollupIndexCapsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/GetRollupIndexCapsRequestTests.java new file mode 100644 index 00000000000..f53b18a977d --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/GetRollupIndexCapsRequestTests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class GetRollupIndexCapsRequestTests extends ESTestCase { + + public void testNullOrEmptyIndices() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new GetRollupIndexCapsRequest((String[]) null)); + assertThat(e.getMessage(), equalTo("[indices] must not be null or empty")); + + String[] indices = new String[]{}; + e = expectThrows(IllegalArgumentException.class, () -> new GetRollupIndexCapsRequest(indices)); + assertThat(e.getMessage(), equalTo("[indices] must not be null or empty")); + + e = expectThrows(IllegalArgumentException.class, () -> new GetRollupIndexCapsRequest(new String[]{"foo", null})); + assertThat(e.getMessage(), equalTo("[index] must not be null or empty")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/GetRollupIndexCapsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/GetRollupIndexCapsResponseTests.java new file mode 100644 index 00000000000..afd0e54f92b --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/GetRollupIndexCapsResponseTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; + +public class GetRollupIndexCapsResponseTests extends RollupCapsResponseTestCase { + + @Override + protected GetRollupIndexCapsResponse createTestInstance() { + return new GetRollupIndexCapsResponse(indices); + } + + @Override + protected void toXContent(GetRollupIndexCapsResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + for (Map.Entry entry : response.getJobs().entrySet()) { + entry.getValue().toXContent(builder, null); + } + builder.endObject(); + } + + @Override + protected GetRollupIndexCapsResponse fromXContent(XContentParser parser) throws IOException { + return GetRollupIndexCapsResponse.fromXContent(parser); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/RollupCapsResponseTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/RollupCapsResponseTestCase.java new file mode 100644 index 00000000000..6d1c0359d17 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/RollupCapsResponseTestCase.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.client.rollup.job.config.DateHistogramGroupConfig; +import org.elasticsearch.client.rollup.job.config.GroupConfig; +import org.elasticsearch.client.rollup.job.config.HistogramGroupConfig; +import org.elasticsearch.client.rollup.job.config.MetricConfig; +import org.elasticsearch.client.rollup.job.config.RollupJobConfig; +import org.elasticsearch.client.rollup.job.config.RollupJobConfigTests; +import org.elasticsearch.client.rollup.job.config.TermsGroupConfig; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +abstract class RollupCapsResponseTestCase extends ESTestCase { + + protected Map indices; + + protected abstract T createTestInstance(); + + protected abstract void toXContent(T response, XContentBuilder builder) throws IOException; + + protected abstract T fromXContent(XContentParser parser) throws IOException; + + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + this::createTestInstance, + this::toXContent, + this::fromXContent) + .supportsUnknownFields(false) + .randomFieldsExcludeFilter(field -> + field.endsWith("job_id")) + .test(); + } + + @Before + private void setupIndices() throws IOException { + int numIndices = randomIntBetween(1,5); + indices = new HashMap<>(numIndices); + for (int i = 0; i < numIndices; i++) { + String indexName = "index_" + randomAlphaOfLength(10); + int numJobs = randomIntBetween(1,5); + List jobs = new ArrayList<>(numJobs); + for (int j = 0; j < numJobs; j++) { + RollupJobConfig config = RollupJobConfigTests.randomRollupJobConfig(randomAlphaOfLength(10)); + jobs.add(new RollupJobCaps(config.getId(), config.getIndexPattern(), + config.getRollupIndex(), createRollupFieldCaps(config))); + } + RollableIndexCaps cap = new RollableIndexCaps(indexName, jobs); + indices.put(indexName, cap); + } + } + + /** + * Lifted from core's RollupJobCaps, so that we can test without having to include this actual logic in the request + */ + private static Map createRollupFieldCaps(final RollupJobConfig rollupJobConfig) { + final Map>> tempFieldCaps = new HashMap<>(); + + final GroupConfig groupConfig = rollupJobConfig.getGroupConfig(); + if (groupConfig != null) { + // Create RollupFieldCaps for the date histogram + final DateHistogramGroupConfig dateHistogram = groupConfig.getDateHistogram(); + final Map dateHistogramAggCap = new HashMap<>(); + dateHistogramAggCap.put("agg", DateHistogramAggregationBuilder.NAME); + dateHistogramAggCap.put("interval", dateHistogram.getInterval().toString()); + if (dateHistogram.getDelay() != null) { + dateHistogramAggCap.put("delay", dateHistogram.getDelay().toString()); + } + dateHistogramAggCap.put("time_zone", dateHistogram.getTimeZone()); + + List> dateAggCaps = tempFieldCaps.getOrDefault(dateHistogram.getField(), new ArrayList<>()); + dateAggCaps.add(dateHistogramAggCap); + tempFieldCaps.put(dateHistogram.getField(), dateAggCaps); + + // Create RollupFieldCaps for the histogram + final HistogramGroupConfig histogram = groupConfig.getHistogram(); + if (histogram != null) { + final Map histogramAggCap = new HashMap<>(); + histogramAggCap.put("agg", HistogramAggregationBuilder.NAME); + histogramAggCap.put("interval", histogram.getInterval()); + Arrays.stream(rollupJobConfig.getGroupConfig().getHistogram().getFields()).forEach(field -> { + List> caps = tempFieldCaps.getOrDefault(field, new ArrayList<>()); + caps.add(histogramAggCap); + tempFieldCaps.put(field, caps); + }); + } + + // Create RollupFieldCaps for the term + final TermsGroupConfig terms = groupConfig.getTerms(); + if (terms != null) { + final Map termsAggCap = singletonMap("agg", TermsAggregationBuilder.NAME); + Arrays.stream(rollupJobConfig.getGroupConfig().getTerms().getFields()).forEach(field -> { + List> caps = tempFieldCaps.getOrDefault(field, new ArrayList<>()); + caps.add(termsAggCap); + tempFieldCaps.put(field, caps); + }); + } + } + + // Create RollupFieldCaps for the metrics + final List metricsConfig = rollupJobConfig.getMetricsConfig(); + if (metricsConfig.size() > 0) { + rollupJobConfig.getMetricsConfig().forEach(metricConfig -> { + final List> metrics = metricConfig.getMetrics().stream() + .map(metric -> singletonMap("agg", (Object) metric)) + .collect(Collectors.toList()); + metrics.forEach(m -> { + List> caps = tempFieldCaps + .getOrDefault(metricConfig.getField(), new ArrayList<>()); + caps.add(m); + tempFieldCaps.put(metricConfig.getField(), caps); + }); + }); + } + + return Collections.unmodifiableMap(tempFieldCaps.entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, + e -> new RollupJobCaps.RollupFieldCaps(e.getValue())))); + } +} diff --git a/docs/java-rest/high-level/rollup/get_rollup_index_caps.asciidoc b/docs/java-rest/high-level/rollup/get_rollup_index_caps.asciidoc new file mode 100644 index 00000000000..52cb7ff9524 --- /dev/null +++ b/docs/java-rest/high-level/rollup/get_rollup_index_caps.asciidoc @@ -0,0 +1,84 @@ +-- +:api: rollup-get-rollup-index-caps +:request: GetRollupIndexCapsRequest +:response: GetRollupIndexCapsResponse +-- + +[id="{upid}-x-pack-{api}"] +=== Get Rollup Index Capabilities API + +The Get Rollup Index Capabilities API allows the user to determine if a concrete index or index pattern contains +stored rollup jobs and data. If it contains data stored from rollup jobs, the capabilities of those jobs +are returned. The API accepts a `GetRollupIndexCapsRequest` object as a request and returns a `GetRollupIndexCapsResponse`. + +[id="{upid}-x-pack-{api}-request"] +==== Get Rollup Index Capabilities Request + +A +{request}+ requires a single parameter: the target index or index pattern (e.g. `rollup-foo`): + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[x-pack-{api}-request] +-------------------------------------------------- + +[id="{upid}-x-pack-{api}-execution"] +==== Execution + +The Get Rollup Index Capabilities API can be executed through a `RollupClient` +instance. Such instance can be retrieved from a `RestHighLevelClient` +using the `rollup()` method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[x-pack-{api}-execute] +-------------------------------------------------- + +[id="{upid}-x-pack-{api}-response"] +==== Response + +The returned +{response}+ holds lists and maps of values which correspond to the capabilities +of the rollup index/index pattern (what jobs are stored in the index, their capabilities, what +aggregations are available, etc). Because multiple jobs can be stored in one index, the +response may include several jobs with different configurations. + +The capabilities are essentially the same as the original job configuration, just presented in a different +manner. For example, if we had created a job with the following config: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[x-pack-{api}-setup] +-------------------------------------------------- + +The +{response}+ object would contain the same information, laid out in a slightly different manner: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[x-pack-{api}-response] +-------------------------------------------------- + +[id="{upid}-x-pack-{api}-async"] +==== Asynchronous Execution + +This request can be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[x-pack-{api}-execute-async] +-------------------------------------------------- +<1> The +{request}+ to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for +{response}+ looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[x-pack-{api}-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index dd867d4691a..1589a476c29 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -311,12 +311,14 @@ The Java High Level REST Client supports the following Rollup APIs: * <<{upid}-rollup-delete-job>> * <> * <<{upid}-x-pack-rollup-get-rollup-caps>> +* <<{upid}-x-pack-rollup-get-rollup-index-caps>> include::rollup/put_job.asciidoc[] include::rollup/start_job.asciidoc[] include::rollup/delete_job.asciidoc[] include::rollup/get_job.asciidoc[] include::rollup/get_rollup_caps.asciidoc[] +include::rollup/get_rollup_index_caps.asciidoc[] == Security APIs diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index 531c5d16467..ddbe0b16cc6 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -208,8 +208,12 @@ The following settings are supported: `storage_class`:: - Sets the S3 storage class type for the backup files. Values may be - `standard`, `reduced_redundancy`, `standard_ia`. Defaults to `standard`. + Sets the S3 storage class for objects stored in the snapshot repository. + Values may be `standard`, `reduced_redundancy`, `standard_ia`. + Defaults to `standard`. Changing this setting on an existing repository + only affects the storage class for newly created objects, resulting in a + mixed usage of storage classes. Additionally, S3 Lifecycle Policies can + be used to manage the storage class of existing objects. Due to the extra complexity with the Glacier class lifecycle, it is not currently supported by the plugin. For more information about the different classes, see http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html[AWS Storage Classes Guide] diff --git a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc index 155c3430bab..e484b9d3d1c 100644 --- a/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc +++ b/docs/reference/ccr/apis/follow/get-follow-stats.asciidoc @@ -190,6 +190,7 @@ GET /follower_index/_ccr/stats // CONSOLE The API returns the following results: + [source,js] -------------------------------------------------- { diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index 200ad016b43..4ae864dd2c9 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -12,12 +12,155 @@ Get {ccr} stats. ==== Description -This API gets {ccr} stats. +This API gets {ccr} stats. This API will return all stats related to {ccr}. In +particular, this API returns stats about auto-following, and returns the same +shard-level stats as in the <>. ==== Request +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT /follower_index/_ccr/follow +{ + "remote_cluster" : "remote_cluster", + "leader_index" : "leader_index" +} +-------------------------------------------------- +// CONSOLE +// TESTSETUP +// TEST[setup:remote_cluster_and_leader_index] + +[source,js] +-------------------------------------------------- +POST /follower_index/_ccr/pause_follow +-------------------------------------------------- +// CONSOLE +// TEARDOWN + +////////////////////////// + [source,js] -------------------------------------------------- GET /_ccr/stats -------------------------------------------------- // CONSOLE + +==== Results + +This API returns the following information: + +`auto_follow_stats`:: + (object) an object representing stats for the auto-follow coordinator + +This object consists of the following fields: + +`auto_follow_stats.number_of_failed_follow_indices`:: + (long) the number of indices that the auto-follow coordinator failed to + automatically follow; the causes of recent failures are captured in the logs + of the elected master node, and in the + `auto_follow_stats.recent_auto_follow_errors` field + +`auto_follow_stats.number_of_failed_remote_cluster_state_requests`:: + (long) the number of times that the auto-follow coordinator failed to retrieve + the cluster state from a remote cluster registered in a collection of + auto-follow patterns + +`auto_follow_stats.number_of_successful_follow_indices`:: + (long) the number of indices that the auto-follow coordinator successfully + followed + +`auto_follow_stats.recent_auto_follow_errors`:: + (array) an array of objects representing failures by the auto-follow + coordinator + +`follow_stats`:: + (object) an object representing shard-level stats for follower indices; refer + to the details of the response in the + <> + +==== Example + +This example retrieves {ccr} stats: + +[source,js] +-------------------------------------------------- +GET /_ccr/stats +-------------------------------------------------- +// CONSOLE + +The API returns the following results: + +[source,js] +-------------------------------------------------- +{ + "auto_follow_stats" : { + "number_of_failed_follow_indices" : 0, + "number_of_failed_remote_cluster_state_requests" : 0, + "number_of_successful_follow_indices" : 1, + "recent_auto_follow_errors" : [ ] + }, + "follow_stats" : { + "indices" : [ + { + "index" : "follower_index", + "shards" : [ + { + "remote_cluster" : "remote_cluster", + "leader_index" : "leader_index", + "follower_index" : "follower_index", + "shard_id" : 0, + "leader_global_checkpoint" : 1024, + "leader_max_seq_no" : 1536, + "follower_global_checkpoint" : 768, + "follower_max_seq_no" : 896, + "last_requested_seq_no" : 897, + "outstanding_read_requests" : 8, + "outstanding_write_requests" : 2, + "write_buffer_operation_count" : 64, + "follower_mapping_version" : 4, + "follower_settings_version" : 2, + "total_read_time_millis" : 32768, + "total_read_remote_exec_time_millis" : 16384, + "successful_read_requests" : 32, + "failed_read_requests" : 0, + "operations_read" : 896, + "bytes_read" : 32768, + "total_write_time_millis" : 16384, + "write_buffer_size_in_bytes" : 1536, + "successful_write_requests" : 16, + "failed_write_requests" : 0, + "operations_written" : 832, + "read_exceptions" : [ ], + "time_since_last_read_millis" : 8 + } + ] + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"number_of_successful_follow_indices" : 1/"number_of_successful_follow_indices" : $body.auto_follow_stats.number_of_successful_follow_indices/] +// TESTRESPONSE[s/"leader_global_checkpoint" : 1024/"leader_global_checkpoint" : $body.follow_stats.indices.0.shards.0.leader_global_checkpoint/] +// TESTRESPONSE[s/"leader_max_seq_no" : 1536/"leader_max_seq_no" : $body.follow_stats.indices.0.shards.0.leader_max_seq_no/] +// TESTRESPONSE[s/"follower_global_checkpoint" : 768/"follower_global_checkpoint" : $body.follow_stats.indices.0.shards.0.follower_global_checkpoint/] +// TESTRESPONSE[s/"follower_max_seq_no" : 896/"follower_max_seq_no" : $body.follow_stats.indices.0.shards.0.follower_max_seq_no/] +// TESTRESPONSE[s/"last_requested_seq_no" : 897/"last_requested_seq_no" : $body.follow_stats.indices.0.shards.0.last_requested_seq_no/] +// TESTRESPONSE[s/"outstanding_read_requests" : 8/"outstanding_read_requests" : $body.follow_stats.indices.0.shards.0.outstanding_read_requests/] +// TESTRESPONSE[s/"outstanding_write_requests" : 2/"outstanding_write_requests" : $body.follow_stats.indices.0.shards.0.outstanding_write_requests/] +// TESTRESPONSE[s/"write_buffer_operation_count" : 64/"write_buffer_operation_count" : $body.follow_stats.indices.0.shards.0.write_buffer_operation_count/] +// TESTRESPONSE[s/"follower_mapping_version" : 4/"follower_mapping_version" : $body.follow_stats.indices.0.shards.0.follower_mapping_version/] +// TESTRESPONSE[s/"follower_settings_version" : 2/"follower_settings_version" : $body.follow_stats.indices.0.shards.0.follower_settings_version/] +// TESTRESPONSE[s/"total_read_time_millis" : 32768/"total_read_time_millis" : $body.follow_stats.indices.0.shards.0.total_read_time_millis/] +// TESTRESPONSE[s/"total_read_remote_exec_time_millis" : 16384/"total_read_remote_exec_time_millis" : $body.follow_stats.indices.0.shards.0.total_read_remote_exec_time_millis/] +// TESTRESPONSE[s/"successful_read_requests" : 32/"successful_read_requests" : $body.follow_stats.indices.0.shards.0.successful_read_requests/] +// TESTRESPONSE[s/"failed_read_requests" : 0/"failed_read_requests" : $body.follow_stats.indices.0.shards.0.failed_read_requests/] +// TESTRESPONSE[s/"operations_read" : 896/"operations_read" : $body.follow_stats.indices.0.shards.0.operations_read/] +// TESTRESPONSE[s/"bytes_read" : 32768/"bytes_read" : $body.follow_stats.indices.0.shards.0.bytes_read/] +// TESTRESPONSE[s/"total_write_time_millis" : 16384/"total_write_time_millis" : $body.follow_stats.indices.0.shards.0.total_write_time_millis/] +// TESTRESPONSE[s/"write_buffer_size_in_bytes" : 1536/"write_buffer_size_in_bytes" : $body.follow_stats.indices.0.shards.0.write_buffer_size_in_bytes/] +// TESTRESPONSE[s/"successful_write_requests" : 16/"successful_write_requests" : $body.follow_stats.indices.0.shards.0.successful_write_requests/] +// TESTRESPONSE[s/"failed_write_requests" : 0/"failed_write_requests" : $body.follow_stats.indices.0.shards.0.failed_write_requests/] +// TESTRESPONSE[s/"operations_written" : 832/"operations_written" : $body.follow_stats.indices.0.shards.0.operations_written/] +// TESTRESPONSE[s/"time_since_last_read_millis" : 8/"time_since_last_read_millis" : $body.follow_stats.indices.0.shards.0.time_since_last_read_millis/] diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index a0f97a659f2..b3d626d4854 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -1,7 +1,329 @@ [role="xpack"] [testenv="platinum"] [[ccr-getting-started]] -== Getting Started +== Getting Started with {ccr} beta[] -This is the getting started section of the {ccr} docs. \ No newline at end of file + +This getting-started guide for {ccr} shows you how to: + +* <> +* <> in a remote cluster +* <> that replicates + a leader index +* <> + +[float] +[[ccr-getting-started-before-you-begin]] +=== Before you begin +. {stack-gs}/get-started-elastic-stack.html#install-elasticsearch[Install {es}] + on your local and remote clusters. + +. Obtain a license that includes the {ccr} features. See + https://www.elastic.co/subscriptions[subscriptions] and + <>. + +. If the Elastic {security-features} are enabled in your local and remote + clusters, you need a user that has appropriate authority to perform the steps + in this tutorial. ++ +-- +[[ccr-getting-started-security]] +The {ccr} features use cluster privileges and built-in roles to make it easier +to control which users have authority to manage {ccr}. + +By default, you can perform all of the steps in this tutorial by +using the built-in `elastic` user. However, a password must be set for this user +before the user can do anything. For information about how to set that password, +see <>. + +If you are performing these steps in a production environment, take extra care +because the `elastic` user has the `superuser` role and you could inadvertently +make significant changes. + +Alternatively, you can assign the appropriate privileges to a user ID of your +choice. On the remote cluster that contains the leader index, a user will need +the `read_ccr` cluster privilege and `monitor` and `read` privileges on the +leader index. + +[source,yml] +-------------------------------------------------- +ccr_user: + cluster: + - read_ccr + indices: + - names: [ 'leader-index' ] + privileges: + - monitor + - read +-------------------------------------------------- + +On the local cluster that contains the follower index, the same user will need +the `manage_ccr` cluster privilege and `monitor`, `read`, `write` and +`manage_follow_index` privileges on the follower index. + +[source,yml] +-------------------------------------------------- +ccr_user: + cluster: + - manage_ccr + indices: + - names: [ 'follower-index' ] + privileges: + - monitor + - read + - write + - manage_follow_index +-------------------------------------------------- + +If you are managing +<> via the +cluster update settings API, you will also need a user with the `all` cluster +privilege. +-- + +[float] +[[ccr-getting-started-remote-cluster]] +=== Connecting to a remote cluster + +The {ccr} features require that you +{ref}/modules-remote-clusters.html[connect your local cluster to a remote +cluster]. In this tutorial, we will connect our local cluster to a remote +cluster with the cluster alias `leader`. + +[source,js] +-------------------------------------------------- +PUT /_cluster/settings +{ + "persistent" : { + "cluster" : { + "remote" : { + "leader" : { + "seeds" : [ + "127.0.0.1:9300" <1> + ] + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:host] +// TEST[s/127.0.0.1:9300/\${transport_host}/] +<1> Specifies the hostname and transport port of a seed node in the remote + cluster. + +You can verify that the local cluster is successfully connected to the remote +cluster. + +[source,js] +-------------------------------------------------- +GET /_remote/info +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The API will respond by showing that the local cluster is connected to the +remote cluster. + +[source,js] +-------------------------------------------------- +{ + "leader" : { + "seeds" : [ + "127.0.0.1:9300" + ], + "connected" : true, <1> + "num_nodes_connected" : 1, <2> + "max_connections_per_cluster" : 3, + "initial_connect_timeout" : "30s", + "skip_unavailable" : false + } +} +-------------------------------------------------- +// TESTRESPONSE +// TEST[s/127.0.0.1:9300/$body.leader.seeds.0/] +// TEST[s/"connected" : true/"connected" : $body.leader.connected/] +// TEST[s/"num_nodes_connected" : 1/"num_nodes_connected" : $body.leader.num_nodes_connected/] +<1> This shows the local cluster is connected to the remote cluster with cluster + alias `leader` +<2> This shows the number of nodes in the remote cluster the local cluster is + connected to. + +[float] +[[ccr-getting-started-leader-index]] +=== Creating a leader index + +Leader indices require special index settings to ensure that the operations that +need to be replicated are available when the +follower requests them from the leader. These settings are used to enable soft +deletes on the leader index and to control how many soft deletes are retained. A +_soft delete_ occurs whenever a document is deleted or updated. Soft deletes can +be enabled only on new indices created on or after {es} 6.5.0. + +In the following example, we will create a leader index in the remote cluster: + +[source,js] +-------------------------------------------------- +PUT /server-metrics +{ + "settings" : { + "index" : { + "number_of_shards" : 1, + "number_of_replicas" : 0, + "soft_deletes" : { + "enabled" : true, <1> + "retention" : { + "operations" : 1024 <2> + } + } + } + }, + "mappings" : { + "metric" : { + "properties" : { + "@timestamp" : { + "type" : "date" + }, + "accept" : { + "type" : "long" + }, + "deny" : { + "type" : "long" + }, + "host" : { + "type" : "keyword" + }, + "response" : { + "type" : "float" + }, + "service" : { + "type" : "keyword" + }, + "total" : { + "type" : "long" + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] +<1> Enables soft deletes on the leader index. +<2> Sets that up to 1024 soft deletes will be retained. + +[float] +[[ccr-getting-started-follower-index]] +=== Creating a follower index + +Follower indices are created with the {ref}/ccr-put-follow.html[create follower +API]. When you create a follower index, you must reference the +<> and the +<> that you created in the remote +cluster. + +[source,js] +-------------------------------------------------- +PUT /server-metrics-copy/_ccr/follow +{ + "remote_cluster" : "leader", + "leader_index" : "server-metrics" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +////////////////////////// + +[source,js] +-------------------------------------------------- +{ + "follow_index_created" : true, + "follow_index_shards_acked" : true, + "index_following_started" : true +} +-------------------------------------------------- +// TESTRESPONSE + +////////////////////////// + +Now when you index documents into your leader index, you will see these +documents replicated in the follower index. You can +inspect the status of replication using the +{ref}/ccr-get-follow-stats[get follower stats API]. + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST /server-metrics-copy/_ccr/pause_follow + +POST /server-metrics-copy/_close + +POST /server-metrics-copy/_ccr/unfollow +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +////////////////////////// + +[float] +[[ccr-getting-started-auto-follow]] +=== Automatically create follower indices + +The auto-follow feature in {ccr} helps for time series use cases where you want +to follow new indices that are periodically created in the remote cluster +(such as daily Beats indices). Auto-following is configured using the +{ref}/ccr-put-auto-follow-pattern.html[create auto-follow pattern API]. With an +auto-follow pattern, you reference the +<> that you connected your +local cluster to. You must also specify a collection of patterns that match the +indices you want to automatically follow. + +For example: + +[source,js] +-------------------------------------------------- +PUT /_ccr/auto_follow/beats +{ + "remote_cluster" : "leader", + "leader_index_patterns" : + [ + "metricbeat-*", <1> + "packetbeat-*" <2> + ], + "follow_index_pattern" : "{{leader_index}}-copy" <3> +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] +<1> Automatically follow new {metricbeat} indices. +<2> Automatically follow new {packetbeat} indices. +<3> The name of the follower index is derived from the name of the leader index + by adding the suffix `-copy` to the name of the leader index. + +////////////////////////// + +[source,js] +-------------------------------------------------- +{ + "acknowledged" : true +} +-------------------------------------------------- +// TESTRESPONSE + +////////////////////////// + +////////////////////////// + +[source,js] +-------------------------------------------------- +DELETE /_ccr/auto_follow/beats +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +////////////////////////// diff --git a/docs/reference/ml/apis/put-job.asciidoc b/docs/reference/ml/apis/put-job.asciidoc index ce053484906..e28bf4f1758 100644 --- a/docs/reference/ml/apis/put-job.asciidoc +++ b/docs/reference/ml/apis/put-job.asciidoc @@ -65,8 +65,8 @@ Instantiates a job. score are applied, as new data is seen. See <>. `results_index_name`:: - (string) The name of the index in which to store the {ml} results. The default - value is `shared`, which corresponds to the index name `.ml-anomalies-shared`. + (string) A text string that affects the name of the {ml} results index. The + default value is `shared`, which generates an index named `.ml-anomalies-shared`. `results_retention_days`:: (long) Advanced configuration option. The number of days for which job results diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index b536c887eab..00000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8db13c6e146c851614c9f862f1eac67431f9b509 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-6d9c714052.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..3c9b97e3e44 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +8f76b85824b273fafa1e25610c3aff66b97b0dd1 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 65e5ca33822..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b474e1a2d7f0172338a08f159849a6c491781d70 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-6d9c714052.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..ed1ae1538be --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +ee5e4e4341fdde3978b01945bbfaac72a200fa04 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 51fb0eebff7..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fc547e69837bcb808f1782bfa35490645bab9cae \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-6d9c714052.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..3840dd32567 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +34dfcdd2e37b62ad01a8bb4fbda66ea6bf513c28 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 3389dc2f73e..00000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e08961a2ec9414947693659ff79bb7e21a410298 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-6d9c714052.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..2cd338ed356 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +25f02c3dfee4efbfe74d87558a6bdd0ea8389e12 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index b0854f65786..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09280919225656c7ce2a14af29666a02bd86c540 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-6d9c714052.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..4dd16813c06 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +1023375e89d6340a93c2409c726a881752eb4ac1 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 00860c9fc83..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -880f10393cdefff7575fbf5b2ced890666ec81dc \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-6d9c714052.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..e82b6c54da0 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +70e598154fb5cb3dced5e82de4afcde2009f1755 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 4818fd1665f..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b41451a9d4e30b8a9a14ccdd7553e5796f77cf44 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-6d9c714052.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..988de653de5 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +e8b4634d426efee1515fc289b4ad67d1c714d14d \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 1b4f444999f..00000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -145fd2c803d682c2cb2d78e6e350e09a09a09ea0 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-6d9c714052.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..67ad39fecd9 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +9f53e03113ca04c337d678126acf025cfeccff6e \ No newline at end of file diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/monitor/os/EvilOsProbeTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/monitor/os/EvilOsProbeTests.java index 429019b3a13..f4936d624e5 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/monitor/os/EvilOsProbeTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/monitor/os/EvilOsProbeTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.nio.file.Files; +import java.util.Collections; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -36,11 +37,21 @@ public class EvilOsProbeTests extends ESTestCase { public void testOsPrettyName() throws IOException { final OsInfo osInfo = OsProbe.getInstance().osInfo(randomLongBetween(1, 100), randomIntBetween(1, 8)); if (Constants.LINUX) { - final List lines = Files.readAllLines(PathUtils.get("/etc/os-release")); + final List lines; + if (Files.exists(PathUtils.get("/etc/os-release"))) { + lines = Files.readAllLines(PathUtils.get("/etc/os-release")); + } else if (Files.exists(PathUtils.get("/usr/lib/os-release"))) { + lines = Files.readAllLines(PathUtils.get("/usr/lib/os-release")); + } else { + lines = Collections.singletonList( + "PRETTY_NAME=\"" + Files.readAllLines(PathUtils.get("/etc/system-release")).get(0) + "\""); + } for (final String line : lines) { if (line != null && line.startsWith("PRETTY_NAME=")) { - final Matcher matcher = Pattern.compile("PRETTY_NAME=(\"?|'?)?([^\"']+)\\1").matcher(line); - assert matcher.matches() : line; + final Matcher matcher = Pattern.compile("PRETTY_NAME=(\"?|'?)?([^\"']+)\\1").matcher(line.trim()); + final boolean matches = matcher.matches(); + assert matches : line; + assert matcher.groupCount() == 2 : line; final String prettyName = matcher.group(2); assertThat(osInfo.getPrettyName(), equalTo(prettyName)); return; diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 3e54326a6c7..00000000000 --- a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6bb87c96d76cdc70be77261d39376613b0a8860c \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..4483e834c28 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +ee88dcf4ea69de2a13df7b76d5524e8fd442f243 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 187572e5251..00000000000 --- a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1b29b3e3b080ec32073c007a1940e5aa7b195316 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..bbdce45b149 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +ec090fd8bd804775aa128ccb20467b062b72d625 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 68553b80b1a..00000000000 --- a/server/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3757a90f73f505d40e6e200d1bacbff897f67548 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..cbdbd1acdc7 --- /dev/null +++ b/server/licenses/lucene-core-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +0bba71a2e8bfd1c15db407ff06ee4185a091d5ec \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 75c05f55ed8..00000000000 --- a/server/licenses/lucene-grouping-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c918cc5ac54e5a4dba4740e9e45a93ebd3c95c77 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..6c29d732fdd --- /dev/null +++ b/server/licenses/lucene-grouping-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +fcee5b1586f7c695c65863ca9ee3a8ebe99c3242 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index afd8b925614..00000000000 --- a/server/licenses/lucene-highlighter-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6cff1fa9ac25c840589d9a39a42ed4629b594cf4 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..35cf72f723e --- /dev/null +++ b/server/licenses/lucene-highlighter-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +0a26a4870e9fddae497be6899fe9a0a2d3002294 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 6b525fa5ea6..00000000000 --- a/server/licenses/lucene-join-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a843337e03493ab5f3498b5dd232fa9abb9e765 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..f2c74d8d845 --- /dev/null +++ b/server/licenses/lucene-join-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +700722c50f8bfcb2d1773b50f43519603961d0ce \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 9487a7fa579..00000000000 --- a/server/licenses/lucene-memory-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -afda00bbee5fb8b4c36867eabb83267b3b2b8c10 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..a3b8ff82ec2 --- /dev/null +++ b/server/licenses/lucene-memory-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +9c9657903e4ade7773aaaf76f19d96e2a936e42d \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 3e6fe1ce378..00000000000 --- a/server/licenses/lucene-misc-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a2d8bc6a0486cfa6b4de8c1103017b35c0193544 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..f7f354945d0 --- /dev/null +++ b/server/licenses/lucene-misc-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +58ce1753cc41dfe445423c4cee42c129576a2ca2 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index dbb72428046..00000000000 --- a/server/licenses/lucene-queries-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79a3b80245a9cf00f24f5d6e298a8e1a887760f1 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..883c92e08cb --- /dev/null +++ b/server/licenses/lucene-queries-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +bf1ee7b66f6e6349624d8760c00669480460a55d \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index db1d47c8307..00000000000 --- a/server/licenses/lucene-queryparser-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -37c9970ec38f64e7ccecbe17efbabdaabe8da2ea \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..98a85a1845f --- /dev/null +++ b/server/licenses/lucene-queryparser-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +2ed20db0ccc53f966cc211aeb3b623dcf69d2cca \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 0e7ba7aeb9e..00000000000 --- a/server/licenses/lucene-sandbox-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7103c3482c728a9788922aa39e39a5ed2bdd3a11 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..a5d5a697df0 --- /dev/null +++ b/server/licenses/lucene-sandbox-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +e06d99480f44eede9302fb7dda3c62f3e8ff68e1 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index bba0f7269e4..00000000000 --- a/server/licenses/lucene-spatial-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -89d389c1020fac58f462819ad822c9b09e52f563 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..cf1a244c9d3 --- /dev/null +++ b/server/licenses/lucene-spatial-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +64ff3b354c21fc371cfeef208158af92cdf93316 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 1d8884aa8f2..00000000000 --- a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b62e34e522f3afa9c3f1655b97b995ff6ba2592d \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..82c8cb37846 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +2dffc0dec40028ca958a0a2fdf0628fd8e8354d0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 1ff50782c17..00000000000 --- a/server/licenses/lucene-spatial3d-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0c92f6b03eb226586b431a834dca90a1f2cd85b8 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..a23c263e7a3 --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +d0ed3d77875bab18abe45706ec8b5d441cf46bdc \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index dd4d9e0665e..00000000000 --- a/server/licenses/lucene-suggest-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a659287ba728f7a0d81694ce32e9ef741a13c19 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-6d9c714052.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..7d48c041d64 --- /dev/null +++ b/server/licenses/lucene-suggest-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +8bb05a98bb9c2615ad1262980dd6b07802bafa1d \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java index 501f6ed59e6..e1d990f0cff 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.geo.parsers; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Explicit; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; @@ -77,7 +78,9 @@ public class GeoWKTParser { final GeoShapeFieldMapper shapeMapper) throws IOException, ElasticsearchParseException { try (StringReader reader = new StringReader(parser.text())) { - boolean ignoreZValue = (shapeMapper != null && shapeMapper.ignoreZValue().value() == true); + Explicit ignoreZValue = (shapeMapper == null) ? GeoShapeFieldMapper.Defaults.IGNORE_Z_VALUE : + shapeMapper.ignoreZValue(); + Explicit coerce = (shapeMapper == null) ? GeoShapeFieldMapper.Defaults.COERCE : shapeMapper.coerce(); // setup the tokenizer; configured to read words w/o numbers StreamTokenizer tokenizer = new StreamTokenizer(reader); tokenizer.resetSyntax(); @@ -90,14 +93,15 @@ public class GeoWKTParser { tokenizer.wordChars('.', '.'); tokenizer.whitespaceChars(0, ' '); tokenizer.commentChar('#'); - ShapeBuilder builder = parseGeometry(tokenizer, shapeType, ignoreZValue); + ShapeBuilder builder = parseGeometry(tokenizer, shapeType, ignoreZValue.value(), coerce.value()); checkEOF(tokenizer); return builder; } } /** parse geometry from the stream tokenizer */ - private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType, final boolean ignoreZValue) + private static ShapeBuilder parseGeometry(StreamTokenizer stream, GeoShapeType shapeType, final boolean ignoreZValue, + final boolean coerce) throws IOException, ElasticsearchParseException { final GeoShapeType type = GeoShapeType.forName(nextWord(stream)); if (shapeType != null && shapeType != GeoShapeType.GEOMETRYCOLLECTION) { @@ -107,21 +111,21 @@ public class GeoWKTParser { } switch (type) { case POINT: - return parsePoint(stream, ignoreZValue); + return parsePoint(stream, ignoreZValue, coerce); case MULTIPOINT: - return parseMultiPoint(stream, ignoreZValue); + return parseMultiPoint(stream, ignoreZValue, coerce); case LINESTRING: - return parseLine(stream, ignoreZValue); + return parseLine(stream, ignoreZValue, coerce); case MULTILINESTRING: - return parseMultiLine(stream, ignoreZValue); + return parseMultiLine(stream, ignoreZValue, coerce); case POLYGON: - return parsePolygon(stream, ignoreZValue); + return parsePolygon(stream, ignoreZValue, coerce); case MULTIPOLYGON: - return parseMultiPolygon(stream, ignoreZValue); + return parseMultiPolygon(stream, ignoreZValue, coerce); case ENVELOPE: return parseBBox(stream); case GEOMETRYCOLLECTION: - return parseGeometryCollection(stream, ignoreZValue); + return parseGeometryCollection(stream, ignoreZValue, coerce); default: throw new IllegalArgumentException("Unknown geometry type: " + type); } @@ -142,7 +146,7 @@ public class GeoWKTParser { return new EnvelopeBuilder(new Coordinate(minLon, maxLat), new Coordinate(maxLon, minLat)); } - private static PointBuilder parsePoint(StreamTokenizer stream, final boolean ignoreZValue) + private static PointBuilder parsePoint(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce) throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; @@ -155,12 +159,12 @@ public class GeoWKTParser { return pt; } - private static List parseCoordinateList(StreamTokenizer stream, final boolean ignoreZValue) + private static List parseCoordinateList(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce) throws IOException, ElasticsearchParseException { CoordinatesBuilder coordinates = new CoordinatesBuilder(); boolean isOpenParen = false; if (isNumberNext(stream) || (isOpenParen = nextWord(stream).equals(LPAREN))) { - coordinates.coordinate(parseCoordinate(stream, ignoreZValue)); + coordinates.coordinate(parseCoordinate(stream, ignoreZValue, coerce)); } if (isOpenParen && nextCloser(stream).equals(RPAREN) == false) { @@ -170,7 +174,7 @@ public class GeoWKTParser { while (nextCloserOrComma(stream).equals(COMMA)) { isOpenParen = false; if (isNumberNext(stream) || (isOpenParen = nextWord(stream).equals(LPAREN))) { - coordinates.coordinate(parseCoordinate(stream, ignoreZValue)); + coordinates.coordinate(parseCoordinate(stream, ignoreZValue, coerce)); } if (isOpenParen && nextCloser(stream).equals(RPAREN) == false) { throw new ElasticsearchParseException("expected: " + RPAREN + " but found: " + tokenString(stream), stream.lineno()); @@ -179,7 +183,7 @@ public class GeoWKTParser { return coordinates.build(); } - private static Coordinate parseCoordinate(StreamTokenizer stream, final boolean ignoreZValue) + private static Coordinate parseCoordinate(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce) throws IOException, ElasticsearchParseException { final double lon = nextNumber(stream); final double lat = nextNumber(stream); @@ -190,71 +194,98 @@ public class GeoWKTParser { return z == null ? new Coordinate(lon, lat) : new Coordinate(lon, lat, z); } - private static MultiPointBuilder parseMultiPoint(StreamTokenizer stream, final boolean ignoreZValue) + private static MultiPointBuilder parseMultiPoint(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce) throws IOException, ElasticsearchParseException { String token = nextEmptyOrOpen(stream); if (token.equals(EMPTY)) { return null; } - return new MultiPointBuilder(parseCoordinateList(stream, ignoreZValue)); + return new MultiPointBuilder(parseCoordinateList(stream, ignoreZValue, coerce)); } - private static LineStringBuilder parseLine(StreamTokenizer stream, final boolean ignoreZValue) + private static LineStringBuilder parseLine(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce) throws IOException, ElasticsearchParseException { String token = nextEmptyOrOpen(stream); if (token.equals(EMPTY)) { return null; } - return new LineStringBuilder(parseCoordinateList(stream, ignoreZValue)); + return new LineStringBuilder(parseCoordinateList(stream, ignoreZValue, coerce)); } - private static MultiLineStringBuilder parseMultiLine(StreamTokenizer stream, final boolean ignoreZValue) + // A LinearRing is closed LineString with 4 or more positions. The first and last positions + // are equivalent (they represent equivalent points). + private static LineStringBuilder parseLinearRing(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce) + throws IOException, ElasticsearchParseException { + String token = nextEmptyOrOpen(stream); + if (token.equals(EMPTY)) { + return null; + } + List coordinates = parseCoordinateList(stream, ignoreZValue, coerce); + int coordinatesNeeded = coerce ? 3 : 4; + if (coordinates.size() >= coordinatesNeeded) { + if (!coordinates.get(0).equals(coordinates.get(coordinates.size() - 1))) { + if (coerce == true) { + coordinates.add(coordinates.get(0)); + } else { + throw new ElasticsearchParseException("invalid LinearRing found (coordinates are not closed)"); + } + } + } + if (coordinates.size() < 4) { + throw new ElasticsearchParseException("invalid number of points in LinearRing (found [{}] - must be >= 4)", + coordinates.size()); + } + return new LineStringBuilder(coordinates); + } + + private static MultiLineStringBuilder parseMultiLine(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce) throws IOException, ElasticsearchParseException { String token = nextEmptyOrOpen(stream); if (token.equals(EMPTY)) { return null; } MultiLineStringBuilder builder = new MultiLineStringBuilder(); - builder.linestring(parseLine(stream, ignoreZValue)); + builder.linestring(parseLine(stream, ignoreZValue, coerce)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.linestring(parseLine(stream, ignoreZValue)); + builder.linestring(parseLine(stream, ignoreZValue, coerce)); } return builder; } - private static PolygonBuilder parsePolygon(StreamTokenizer stream, final boolean ignoreZValue) + private static PolygonBuilder parsePolygon(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce) throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } - PolygonBuilder builder = new PolygonBuilder(parseLine(stream, ignoreZValue), ShapeBuilder.Orientation.RIGHT); + PolygonBuilder builder = new PolygonBuilder(parseLinearRing(stream, ignoreZValue, coerce), ShapeBuilder.Orientation.RIGHT); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.hole(parseLine(stream, ignoreZValue)); + builder.hole(parseLinearRing(stream, ignoreZValue, coerce)); } return builder; } - private static MultiPolygonBuilder parseMultiPolygon(StreamTokenizer stream, final boolean ignoreZValue) + private static MultiPolygonBuilder parseMultiPolygon(StreamTokenizer stream, final boolean ignoreZValue, final boolean coerce) throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } - MultiPolygonBuilder builder = new MultiPolygonBuilder().polygon(parsePolygon(stream, ignoreZValue)); + MultiPolygonBuilder builder = new MultiPolygonBuilder().polygon(parsePolygon(stream, ignoreZValue, coerce)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.polygon(parsePolygon(stream, ignoreZValue)); + builder.polygon(parsePolygon(stream, ignoreZValue, coerce)); } return builder; } - private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer stream, final boolean ignoreZValue) + private static GeometryCollectionBuilder parseGeometryCollection(StreamTokenizer stream, final boolean ignoreZValue, + final boolean coerce) throws IOException, ElasticsearchParseException { if (nextEmptyOrOpen(stream).equals(EMPTY)) { return null; } GeometryCollectionBuilder builder = new GeometryCollectionBuilder().shape( - parseGeometry(stream, GeoShapeType.GEOMETRYCOLLECTION, ignoreZValue)); + parseGeometry(stream, GeoShapeType.GEOMETRYCOLLECTION, ignoreZValue, coerce)); while (nextCloserOrComma(stream).equals(COMMA)) { - builder.shape(parseGeometry(stream, null, ignoreZValue)); + builder.shape(parseGeometry(stream, null, ignoreZValue, coerce)); } return builder; } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 871c510f3de..71929276bb1 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -299,6 +299,7 @@ public final class ClusterSettings extends AbstractScopedSettings { RemoteClusterService.ENABLE_REMOTE_CLUSTERS, RemoteClusterService.SEARCH_ENABLE_REMOTE_CLUSTERS, RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE, + RemoteClusterService.REMOTE_CLUSTER_COMPRESS, TransportService.TRACE_LOG_EXCLUDE_SETTING, TransportService.TRACE_LOG_INCLUDE_SETTING, TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, diff --git a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java index 72f36278783..d2d081a63d0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/GeoShapeFieldMapper.java @@ -135,7 +135,7 @@ public class GeoShapeFieldMapper extends FieldMapper { public Builder coerce(boolean coerce) { this.coerce = coerce; - return builder; + return this; } @Override @@ -155,7 +155,7 @@ public class GeoShapeFieldMapper extends FieldMapper { public Builder ignoreMalformed(boolean ignoreMalformed) { this.ignoreMalformed = ignoreMalformed; - return builder; + return this; } protected Explicit ignoreMalformed(BuilderContext context) { diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java b/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java index 06a5aadd229..18173dd275a 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsProbe.java @@ -33,10 +33,13 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import java.util.stream.Collectors; public class OsProbe { @@ -547,16 +550,13 @@ public class OsProbe { final Optional maybePrettyNameLine = prettyNameLines.size() == 1 ? Optional.of(prettyNameLines.get(0)) : Optional.empty(); if (maybePrettyNameLine.isPresent()) { - final String prettyNameLine = maybePrettyNameLine.get(); - final String[] prettyNameFields = prettyNameLine.split("="); - assert prettyNameFields.length == 2 : prettyNameLine; - if (prettyNameFields[1].length() >= 3 && - (prettyNameFields[1].startsWith("\"") && prettyNameFields[1].endsWith("\"")) || - (prettyNameFields[1].startsWith("'") && prettyNameFields[1].endsWith("'"))) { - return prettyNameFields[1].substring(1, prettyNameFields[1].length() - 1); - } else { - return prettyNameFields[1]; - } + // we trim since some OS contain trailing space, for example, Oracle Linux Server 6.9 has a trailing space after the quote + final String trimmedPrettyNameLine = maybePrettyNameLine.get().trim(); + final Matcher matcher = Pattern.compile("PRETTY_NAME=(\"?|'?)?([^\"']+)\\1").matcher(trimmedPrettyNameLine); + final boolean matches = matcher.matches(); + assert matches : trimmedPrettyNameLine; + assert matcher.groupCount() == 2 : trimmedPrettyNameLine; + return matcher.group(2); } else { return Constants.OS_NAME; } @@ -567,22 +567,33 @@ public class OsProbe { } /** - * The lines from {@code /etc/os-release} or {@code /usr/lib/os-release} as a fallback. These file represents identification of the - * underlying operating system. The structure of the file is newlines of key-value pairs of shell-compatible variable assignments. + * The lines from {@code /etc/os-release} or {@code /usr/lib/os-release} as a fallback, with an additional fallback to + * {@code /etc/system-release}. These files represent identification of the underlying operating system. The structure of the file is + * newlines of key-value pairs of shell-compatible variable assignments. * - * @return the lines from {@code /etc/os-release} or {@code /usr/lib/os-release} - * @throws IOException if an I/O exception occurs reading {@code /etc/os-release} or {@code /usr/lib/os-release} + * @return the lines from {@code /etc/os-release} or {@code /usr/lib/os-release} or {@code /etc/system-release} + * @throws IOException if an I/O exception occurs reading {@code /etc/os-release} or {@code /usr/lib/os-release} or + * {@code /etc/system-release} */ - @SuppressForbidden(reason = "access /etc/os-release or /usr/lib/os-release") + @SuppressForbidden(reason = "access /etc/os-release or /usr/lib/os-release or /etc/system-release") List readOsRelease() throws IOException { final List lines; if (Files.exists(PathUtils.get("/etc/os-release"))) { lines = Files.readAllLines(PathUtils.get("/etc/os-release")); - } else { + assert lines != null && lines.isEmpty() == false; + return lines; + } else if (Files.exists(PathUtils.get("/usr/lib/os-release"))) { lines = Files.readAllLines(PathUtils.get("/usr/lib/os-release")); + assert lines != null && lines.isEmpty() == false; + return lines; + } else if (Files.exists(PathUtils.get("/etc/system-release"))) { + // fallback for older Red Hat-like OS + lines = Files.readAllLines(PathUtils.get("/etc/system-release")); + assert lines != null && lines.size() == 1; + return Collections.singletonList("PRETTY_NAME=\"" + lines.get(0) + "\""); + } else { + return Collections.emptyList(); } - assert lines != null && lines.isEmpty() == false; - return lines; } public OsStats osStats() { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ParsedReverseNested.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ParsedReverseNested.java index dec15c3eded..fafed73370c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ParsedReverseNested.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ParsedReverseNested.java @@ -23,7 +23,7 @@ import org.elasticsearch.search.aggregations.bucket.ParsedSingleBucketAggregatio import java.io.IOException; -public class ParsedReverseNested extends ParsedSingleBucketAggregation implements Nested { +public class ParsedReverseNested extends ParsedSingleBucketAggregation implements ReverseNested { @Override public String getType() { diff --git a/server/src/main/resources/org/elasticsearch/bootstrap/security.policy b/server/src/main/resources/org/elasticsearch/bootstrap/security.policy index 0df1cf17a84..4df99ef6f88 100644 --- a/server/src/main/resources/org/elasticsearch/bootstrap/security.policy +++ b/server/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -127,6 +127,7 @@ grant { // OS release on Linux permission java.io.FilePermission "/etc/os-release", "read"; permission java.io.FilePermission "/usr/lib/os-release", "read"; + permission java.io.FilePermission "/etc/system-release", "read"; // io stats on Linux permission java.io.FilePermission "/proc/self/mountinfo", "read"; diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java index 696279ece4b..965ca234ddd 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java @@ -318,6 +318,31 @@ public class GeoWKTShapeParserTests extends BaseGeoParsingTestCase { assertEquals(shapeBuilder.numDimensions(), 3); } + public void testParseOpenPolygon() throws IOException { + String openPolygon = "POLYGON ((100 5, 100 10, 90 10, 90 5))"; + + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().value(openPolygon); + XContentParser parser = createParser(xContentBuilder); + parser.nextToken(); + + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build(); + + Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath()); + final GeoShapeFieldMapper defaultMapperBuilder = new GeoShapeFieldMapper.Builder("test").coerce(false).build(mockBuilderContext); + ElasticsearchParseException exception = expectThrows(ElasticsearchParseException.class, + () -> ShapeParser.parse(parser, defaultMapperBuilder)); + assertEquals("invalid LinearRing found (coordinates are not closed)", exception.getMessage()); + + final GeoShapeFieldMapper coercingMapperBuilder = new GeoShapeFieldMapper.Builder("test").coerce(true).build(mockBuilderContext); + ShapeBuilder shapeBuilder = ShapeParser.parse(parser, coercingMapperBuilder); + assertNotNull(shapeBuilder); + assertEquals("polygon ((100.0 5.0, 100.0 10.0, 90.0 10.0, 90.0 5.0, 100.0 5.0))", shapeBuilder.toWKT()); + } + public void testParseSelfCrossingPolygon() throws IOException { // test self crossing ccw poly not crossing dateline List shellCoordinates = new ArrayList<>(); diff --git a/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java b/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java index 4e94e135238..f76ac728948 100644 --- a/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/os/OsProbeTests.java @@ -27,6 +27,7 @@ import java.math.BigInteger; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Locale; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; @@ -55,12 +56,10 @@ public class OsProbeTests extends ESTestCase { List readOsRelease() throws IOException { assert Constants.LINUX : Constants.OS_NAME; if (prettyName != null) { - final String quote = randomFrom("\"", "'", null); - if (quote == null) { - return Arrays.asList("NAME=" + randomAlphaOfLength(16), "PRETTY_NAME=" + prettyName); - } else { - return Arrays.asList("NAME=" + randomAlphaOfLength(16), "PRETTY_NAME=" + quote + prettyName + quote); - } + final String quote = randomFrom("\"", "'", ""); + final String space = randomFrom(" ", ""); + final String prettyNameLine = String.format(Locale.ROOT, "PRETTY_NAME=%s%s%s%s", quote, prettyName, quote, space); + return Arrays.asList("NAME=" + randomAlphaOfLength(16), prettyNameLine); } else { return Collections.singletonList("NAME=" + randomAlphaOfLength(16)); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNestedTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNestedTests.java index a43e0dd519f..039ef8fd129 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNestedTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNestedTests.java @@ -22,9 +22,11 @@ package org.elasticsearch.search.aggregations.bucket.nested; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalSingleBucketAggregationTestCase; +import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.bucket.ParsedSingleBucketAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import java.io.IOException; import java.util.List; import java.util.Map; @@ -49,4 +51,10 @@ public class InternalNestedTests extends InternalSingleBucketAggregationTestCase protected Class implementationClass() { return ParsedNested.class; } + + @Override + protected void assertFromXContent(InternalNested aggregation, ParsedAggregation parsedAggregation) throws IOException { + super.assertFromXContent(aggregation, parsedAggregation); + assertTrue(parsedAggregation instanceof Nested); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/InternalReverseNestedTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/InternalReverseNestedTests.java index ddfe10b27eb..6031d435adc 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/InternalReverseNestedTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/InternalReverseNestedTests.java @@ -22,9 +22,11 @@ package org.elasticsearch.search.aggregations.bucket.nested; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalSingleBucketAggregationTestCase; +import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.bucket.ParsedSingleBucketAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import java.io.IOException; import java.util.List; import java.util.Map; @@ -49,4 +51,10 @@ public class InternalReverseNestedTests extends InternalSingleBucketAggregationT protected Class implementationClass() { return ParsedReverseNested.class; } + + @Override + protected void assertFromXContent(InternalReverseNested aggregation, ParsedAggregation parsedAggregation) throws IOException { + super.assertFromXContent(aggregation, parsedAggregation); + assertTrue(parsedAggregation instanceof ReverseNested); + } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index e2a0827c14d..84207156033 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -99,6 +99,7 @@ public class RemoteClusterServiceTests extends ESTestCase { assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING)); assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_NODE_ATTRIBUTE)); assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE)); + assertTrue(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS.contains(RemoteClusterService.REMOTE_CLUSTER_COMPRESS)); } public void testRemoteClusterSeedSetting() { diff --git a/x-pack/plugin/ccr/qa/multi-cluster-downgraded-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster-downgraded-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java index c40571fe1d2..400006f5c02 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-downgraded-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster-downgraded-to-basic-license/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -28,7 +28,6 @@ import static org.hamcrest.core.Is.is; public class FollowIndexIT extends ESCCRRestTestCase { public void testDowngradeRemoteClusterToBasic() throws Exception { - assumeFalse("windows is the worst", Constants.WINDOWS); if ("follow".equals(targetCluster) == false) { return; } @@ -78,27 +77,30 @@ public class FollowIndexIT extends ESCCRRestTestCase { assertThat(indexExists(index2), is(false)); // parse the logs and ensure that the auto-coordinator skipped coordination on the leader cluster - assertBusy(() -> { - final List lines = Files.readAllLines(PathUtils.get(System.getProperty("log"))); - final Iterator it = lines.iterator(); - boolean warn = false; - while (it.hasNext()) { - final String line = it.next(); - if (line.matches(".*\\[WARN\\s*\\]\\[o\\.e\\.x\\.c\\.a\\.AutoFollowCoordinator\\s*\\] \\[node-0\\] " + - "failure occurred while fetching cluster state for auto follow pattern \\[test_pattern\\]")) { - warn = true; - break; + // (does not work on windows...) + if (Constants.WINDOWS == false) { + assertBusy(() -> { + final List lines = Files.readAllLines(PathUtils.get(System.getProperty("log"))); + final Iterator it = lines.iterator(); + boolean warn = false; + while (it.hasNext()) { + final String line = it.next(); + if (line.matches(".*\\[WARN\\s*\\]\\[o\\.e\\.x\\.c\\.a\\.AutoFollowCoordinator\\s*\\] \\[node-0\\] " + + "failure occurred while fetching cluster state for auto follow pattern \\[test_pattern\\]")) { + warn = true; + break; + } } - } - assertTrue(warn); - assertTrue(it.hasNext()); - final String lineAfterWarn = it.next(); - assertThat( - lineAfterWarn, - equalTo("org.elasticsearch.ElasticsearchStatusException: " + - "can not fetch remote cluster state as the remote cluster [leader_cluster] is not licensed for [ccr]; " + - "the license mode [BASIC] on cluster [leader_cluster] does not enable [ccr]")); - }); + assertTrue(warn); + assertTrue(it.hasNext()); + final String lineAfterWarn = it.next(); + assertThat( + lineAfterWarn, + equalTo("org.elasticsearch.ElasticsearchStatusException: " + + "can not fetch remote cluster state as the remote cluster [leader_cluster] is not licensed for [ccr]; " + + "the license mode [BASIC] on cluster [leader_cluster] does not enable [ccr]")); + }); + } }); // Manually following index2 also does not work after the downgrade: diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java index 943f85010e2..2407344b04d 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java @@ -25,7 +25,7 @@ public class RestCcrStatsAction extends BaseRestHandler { @Override public String getName() { - return "ccr_auto_follow_stats"; + return "ccr_stats"; } @Override diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java index 15b73292976..8da8b66d8c2 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java @@ -26,7 +26,7 @@ public class RestFollowStatsAction extends BaseRestHandler { @Override public String getName() { - return "ccr_stats"; + return "ccr_follower_stats"; } @Override diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java index 94609128c94..82fe66c0e2d 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/indexlifecycle/TimeSeriesLifecycleActionsIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.xpack.core.indexlifecycle.Phase; import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; +import org.elasticsearch.xpack.core.indexlifecycle.ShrinkStep; import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import org.elasticsearch.xpack.core.indexlifecycle.TerminalPolicyStep; import org.junit.Before; @@ -178,6 +179,41 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase { assertBusy(() -> assertFalse(indexExists(shrunkenOriginalIndex))); } + public void testRetryFailedShrinkAction() throws Exception { + int numShards = 6; + int divisor = randomFrom(2, 3, 6); + int expectedFinalShards = numShards / divisor; + String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index; + createIndexWithSettings(index, Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + createNewSingletonPolicy("warm", new ShrinkAction(numShards + randomIntBetween(1, numShards))); + updatePolicy(index, policy); + assertBusy(() -> { + String failedStep = getFailedStepForIndex(index); + assertThat(failedStep, equalTo(ShrinkStep.NAME)); + }); + + // update policy to be correct + createNewSingletonPolicy("warm", new ShrinkAction(expectedFinalShards)); + updatePolicy(index, policy); + + // retry step + Request retryRequest = new Request("POST", index + "/_ilm/retry"); + assertOK(client().performRequest(retryRequest)); + + // assert corrected policy is picked up and index is shrunken + assertBusy(() -> { + logger.error(explainIndex(index)); + assertTrue(indexExists(shrunkenIndex)); + assertTrue(aliasExists(shrunkenIndex, index)); + Map settings = getOnlyIndexSettings(shrunkenIndex); + assertThat(getStepKeyForIndex(shrunkenIndex), equalTo(TerminalPolicyStep.KEY)); + assertThat(settings.get(IndexMetaData.SETTING_NUMBER_OF_SHARDS), equalTo(String.valueOf(expectedFinalShards))); + assertThat(settings.get(IndexMetaData.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true")); + }); + expectThrows(ResponseException.class, this::indexDocument); + } + public void testRolloverAction() throws Exception { String originalIndex = index + "-000001"; String secondIndex = index + "-000002"; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java index 9e5ef7b01c5..70aa9af2c72 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/ExecuteStepsUpdateTask.java @@ -101,7 +101,7 @@ public class ExecuteStepsUpdateTask extends ClusterStateUpdateTask { return state; } else { state = IndexLifecycleRunner.moveClusterStateToNextStep(index, state, currentStep.getKey(), - currentStep.getNextStepKey(), nowSupplier); + currentStep.getNextStepKey(), nowSupplier, false); } } else { // cluster state wait step so evaluate the @@ -125,7 +125,7 @@ public class ExecuteStepsUpdateTask extends ClusterStateUpdateTask { return state; } else { state = IndexLifecycleRunner.moveClusterStateToNextStep(index, state, currentStep.getKey(), - currentStep.getNextStepKey(), nowSupplier); + currentStep.getNextStepKey(), nowSupplier, false); } } else { logger.trace("[{}] condition not met ({}) [{}], returning existing state", diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java index f5e33fdb980..2f7947bb517 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleRunner.java @@ -271,11 +271,13 @@ public class IndexLifecycleRunner { * @param nextStepKey The next step to move the index into * @param nowSupplier The current-time supplier for updating when steps changed * @param stepRegistry The steps registry to check a step-key's existence in the index's current policy + * @param forcePhaseDefinitionRefresh When true, step information will be recompiled from the latest version of the + * policy. Otherwise, existing phase definition is used. * @return The updated cluster state where the index moved to nextStepKey */ static ClusterState moveClusterStateToStep(String indexName, ClusterState currentState, StepKey currentStepKey, StepKey nextStepKey, LongSupplier nowSupplier, - PolicyStepsRegistry stepRegistry) { + PolicyStepsRegistry stepRegistry, boolean forcePhaseDefinitionRefresh) { IndexMetaData idxMeta = currentState.getMetaData().index(indexName); Settings indexSettings = idxMeta.getSettings(); String indexPolicySetting = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexSettings); @@ -295,18 +297,19 @@ public class IndexLifecycleRunner { "] with policy [" + indexPolicySetting + "] does not exist"); } - return IndexLifecycleRunner.moveClusterStateToNextStep(idxMeta.getIndex(), currentState, currentStepKey, nextStepKey, nowSupplier); + return IndexLifecycleRunner.moveClusterStateToNextStep(idxMeta.getIndex(), currentState, currentStepKey, + nextStepKey, nowSupplier, forcePhaseDefinitionRefresh); } static ClusterState moveClusterStateToNextStep(Index index, ClusterState clusterState, StepKey currentStep, StepKey nextStep, - LongSupplier nowSupplier) { + LongSupplier nowSupplier, boolean forcePhaseDefinitionRefresh) { IndexMetaData idxMeta = clusterState.getMetaData().index(index); IndexLifecycleMetadata ilmMeta = clusterState.metaData().custom(IndexLifecycleMetadata.TYPE); LifecyclePolicyMetadata policyMetadata = ilmMeta.getPolicyMetadatas() .get(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(idxMeta.getSettings())); LifecycleExecutionState lifecycleState = LifecycleExecutionState.fromIndexMetadata(idxMeta); LifecycleExecutionState newLifecycleState = moveExecutionStateToNextStep(policyMetadata, - lifecycleState, currentStep, nextStep, nowSupplier); + lifecycleState, currentStep, nextStep, nowSupplier, forcePhaseDefinitionRefresh); ClusterState.Builder newClusterStateBuilder = newClusterStateWithLifecycleState(index, clusterState, newLifecycleState); return newClusterStateBuilder.build(); @@ -324,7 +327,7 @@ public class IndexLifecycleRunner { causeXContentBuilder.endObject(); LifecycleExecutionState nextStepState = moveExecutionStateToNextStep(policyMetadata, LifecycleExecutionState.fromIndexMetadata(idxMeta), currentStep, new StepKey(currentStep.getPhase(), - currentStep.getAction(), ErrorStep.NAME), nowSupplier); + currentStep.getAction(), ErrorStep.NAME), nowSupplier, false); LifecycleExecutionState.Builder failedState = LifecycleExecutionState.builder(nextStepState); failedState.setFailedStep(currentStep.getName()); failedState.setStepInfo(BytesReference.bytes(causeXContentBuilder).utf8ToString()); @@ -343,9 +346,9 @@ public class IndexLifecycleRunner { StepKey currentStepKey = IndexLifecycleRunner.getCurrentStepKey(lifecycleState); String failedStep = lifecycleState.getFailedStep(); if (currentStepKey != null && ErrorStep.NAME.equals(currentStepKey.getName()) - && Strings.isNullOrEmpty(failedStep) == false) { + && Strings.isNullOrEmpty(failedStep) == false) { StepKey nextStepKey = new StepKey(currentStepKey.getPhase(), currentStepKey.getAction(), failedStep); - newState = moveClusterStateToStep(index, currentState, currentStepKey, nextStepKey, nowSupplier, stepRegistry); + newState = moveClusterStateToStep(index, currentState, currentStepKey, nextStepKey, nowSupplier, stepRegistry, true); } else { throw new IllegalArgumentException("cannot retry an action for an index [" + index + "] that has not encountered an error when running a Lifecycle Policy"); @@ -357,7 +360,8 @@ public class IndexLifecycleRunner { private static LifecycleExecutionState moveExecutionStateToNextStep(LifecyclePolicyMetadata policyMetadata, LifecycleExecutionState existingState, StepKey currentStep, StepKey nextStep, - LongSupplier nowSupplier) { + LongSupplier nowSupplier, + boolean forcePhaseDefinitionRefresh) { long nowAsMillis = nowSupplier.getAsLong(); LifecycleExecutionState.Builder updatedState = LifecycleExecutionState.builder(existingState); updatedState.setPhase(nextStep.getPhase()); @@ -369,7 +373,7 @@ public class IndexLifecycleRunner { updatedState.setFailedStep(null); updatedState.setStepInfo(null); - if (currentStep.getPhase().equals(nextStep.getPhase()) == false) { + if (currentStep.getPhase().equals(nextStep.getPhase()) == false || forcePhaseDefinitionRefresh) { final String newPhaseDefinition; final Phase nextPhase; if ("new".equals(nextStep.getPhase()) || TerminalPolicyStep.KEY.equals(nextStep)) { diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java index 637c1855dd0..b77997a94a3 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleService.java @@ -84,7 +84,7 @@ public class IndexLifecycleService public ClusterState moveClusterStateToStep(ClusterState currentState, String indexName, StepKey currentStepKey, StepKey nextStepKey) { return IndexLifecycleRunner.moveClusterStateToStep(indexName, currentState, currentStepKey, nextStepKey, - nowSupplier, policyRegistry); + nowSupplier, policyRegistry, false); } public ClusterState moveClusterStateToFailedStep(ClusterState currentState, String[] indices) { diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTask.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTask.java index 750fd1af5da..246cda6192f 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTask.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/MoveToNextStepUpdateTask.java @@ -68,7 +68,7 @@ public class MoveToNextStepUpdateTask extends ClusterStateUpdateTask { if (policy.equals(LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexSettings)) && currentStepKey.equals(IndexLifecycleRunner.getCurrentStepKey(indexILMData))) { logger.trace("moving [{}] to next step ({})", index.getName(), nextStepKey); - return IndexLifecycleRunner.moveClusterStateToNextStep(index, currentState, currentStepKey, nextStepKey, nowSupplier); + return IndexLifecycleRunner.moveClusterStateToNextStep(index, currentState, currentStepKey, nextStepKey, nowSupplier, false); } else { // either the policy has changed or the step is now // not the same as when we submitted the update task. In diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRetryAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRetryAction.java index b0f25ed7951..c9fabe82257 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRetryAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/indexlifecycle/action/TransportRetryAction.java @@ -13,11 +13,14 @@ import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.indexlifecycle.LifecycleExecutionState; +import org.elasticsearch.xpack.core.indexlifecycle.Step.StepKey; import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction; import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction.Request; import org.elasticsearch.xpack.core.indexlifecycle.action.RetryAction.Response; @@ -55,6 +58,22 @@ public class TransportRetryAction extends TransportMasterNodeAction now); + () -> now, false); assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); @@ -684,7 +684,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase { clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); index = clusterState.metaData().index(indexName).getIndex(); - newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now); + newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now, false); assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); } @@ -698,7 +698,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase { Collections.emptyList()); Index index = clusterState.metaData().index(indexName).getIndex(); ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, - () -> now); + () -> now, false); assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); @@ -711,7 +711,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase { clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList()); index = clusterState.metaData().index(indexName).getIndex(); - newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now); + newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now, false); assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); } @@ -725,7 +725,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase { Collections.emptyList()); Index index = clusterState.metaData().index(indexName).getIndex(); ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, - () -> now); + () -> now, false); assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); @@ -737,7 +737,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase { } clusterState = buildClusterState(indexName, Settings.builder(), lifecycleState.build(), Collections.emptyList()); index = clusterState.metaData().index(indexName).getIndex(); - newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now); + newClusterState = IndexLifecycleRunner.moveClusterStateToNextStep(index, clusterState, currentStep, nextStep, () -> now, false); assertClusterStateOnNextStep(clusterState, index, currentStep, nextStep, newClusterState, now); } @@ -764,7 +764,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase { ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), policyMetadatas); Index index = clusterState.metaData().index(indexName).getIndex(); ClusterState newClusterState = IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, currentStepKey, - nextStepKey, () -> now, stepRegistry); + nextStepKey, () -> now, stepRegistry, false); assertClusterStateOnNextStep(clusterState, index, currentStepKey, nextStepKey, newClusterState, now); } @@ -786,7 +786,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase { ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, currentStepKey, - nextStepKey, () -> now, stepRegistry)); + nextStepKey, () -> now, stepRegistry, false)); assertThat(exception.getMessage(), equalTo("index [my_index] is not associated with an Index Lifecycle Policy")); } @@ -809,7 +809,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase { ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, notCurrentStepKey, - nextStepKey, () -> now, stepRegistry)); + nextStepKey, () -> now, stepRegistry, false)); assertThat(exception.getMessage(), equalTo("index [my_index] is not on current step " + "[{\"phase\":\"not_current_phase\",\"action\":\"not_current_action\",\"name\":\"not_current_step\"}]")); } @@ -832,7 +832,7 @@ public class IndexLifecycleRunnerTests extends ESTestCase { ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> IndexLifecycleRunner.moveClusterStateToStep(indexName, clusterState, currentStepKey, - nextStepKey, () -> now, stepRegistry)); + nextStepKey, () -> now, stepRegistry, false)); assertThat(exception.getMessage(), equalTo("step [{\"phase\":\"next_phase\",\"action\":\"next_action\",\"name\":\"next_step\"}] " + "for index [my_index] with policy [my_policy] does not exist")); @@ -866,18 +866,26 @@ public class IndexLifecycleRunnerTests extends ESTestCase { String[] indices = new String[] { indexName }; String policyName = "my_policy"; long now = randomNonNegativeLong(); - StepKey failedStepKey = new StepKey("current_phase", "current_action", "current_step"); + StepKey failedStepKey = new StepKey("current_phase", MockAction.NAME, "current_step"); StepKey errorStepKey = new StepKey(failedStepKey.getPhase(), failedStepKey.getAction(), ErrorStep.NAME); Step step = new MockStep(failedStepKey, null); + LifecyclePolicy policy = createPolicy(policyName, failedStepKey, null); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong()); + PolicyStepsRegistry policyRegistry = createOneStepPolicyStepRegistry(policyName, step, indexName); Settings.Builder indexSettingsBuilder = Settings.builder() .put(LifecycleSettings.LIFECYCLE_NAME, policyName); LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); lifecycleState.setPhase(errorStepKey.getPhase()); + lifecycleState.setPhaseTime(now); lifecycleState.setAction(errorStepKey.getAction()); + lifecycleState.setActionTime(now); lifecycleState.setStep(errorStepKey.getName()); + lifecycleState.setStepTime(now); lifecycleState.setFailedStep(failedStepKey.getName()); - ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), Collections.emptyList()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), + Collections.singletonList(policyMetadata)); Index index = clusterState.metaData().index(indexName).getIndex(); IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, () -> now); ClusterState nextClusterState = runner.moveClusterStateToFailedStep(clusterState, indices); @@ -885,6 +893,41 @@ public class IndexLifecycleRunnerTests extends ESTestCase { nextClusterState, now); } + public void testMoveClusterStateToFailedStepWithUnknownStep() { + String indexName = "my_index"; + String[] indices = new String[] { indexName }; + String policyName = "my_policy"; + long now = randomNonNegativeLong(); + StepKey failedStepKey = new StepKey("current_phase", MockAction.NAME, "current_step"); + StepKey errorStepKey = new StepKey(failedStepKey.getPhase(), failedStepKey.getAction(), ErrorStep.NAME); + + StepKey registeredStepKey = new StepKey(randomFrom(failedStepKey.getPhase(), "other"), + MockAction.NAME, "different_step"); + Step step = new MockStep(registeredStepKey, null); + LifecyclePolicy policy = createPolicy(policyName, failedStepKey, null); + LifecyclePolicyMetadata policyMetadata = new LifecyclePolicyMetadata(policy, Collections.emptyMap(), + randomNonNegativeLong(), randomNonNegativeLong()); + + PolicyStepsRegistry policyRegistry = createOneStepPolicyStepRegistry(policyName, step, indexName); + Settings.Builder indexSettingsBuilder = Settings.builder() + .put(LifecycleSettings.LIFECYCLE_NAME, policyName); + LifecycleExecutionState.Builder lifecycleState = LifecycleExecutionState.builder(); + lifecycleState.setPhase(errorStepKey.getPhase()); + lifecycleState.setPhaseTime(now); + lifecycleState.setAction(errorStepKey.getAction()); + lifecycleState.setActionTime(now); + lifecycleState.setStep(errorStepKey.getName()); + lifecycleState.setStepTime(now); + lifecycleState.setFailedStep(failedStepKey.getName()); + ClusterState clusterState = buildClusterState(indexName, indexSettingsBuilder, lifecycleState.build(), + Collections.singletonList(policyMetadata)); + IndexLifecycleRunner runner = new IndexLifecycleRunner(policyRegistry, null, () -> now); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> runner.moveClusterStateToFailedStep(clusterState, indices)); + assertThat(exception.getMessage(), equalTo("step [" + failedStepKey + + "] for index [my_index] with policy [my_policy] does not exist")); + } + public void testMoveClusterStateToFailedStepIndexNotFound() { String existingIndexName = "my_index"; String invalidIndexName = "does_not_exist"; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java index de938d0a951..93d440b79d4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/filestructurefinder/DelimitedFileStructureFinder.java @@ -82,7 +82,7 @@ public class DelimitedFileStructureFinder implements FileStructureFinder { int lineNumber = lineNumbers.get(index); Map sampleRecord = new LinkedHashMap<>(); Util.filterListToMap(sampleRecord, columnNames, - trimFields ? row.stream().map(String::trim).collect(Collectors.toList()) : row); + trimFields ? row.stream().map(field -> (field == null) ? null : field.trim()).collect(Collectors.toList()) : row); sampleRecords.add(sampleRecord); sampleMessages.add( sampleLines.subList(prevMessageEndLineNumber + 1, lineNumbers.get(index)).stream().collect(Collectors.joining("\n"))); diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 deleted file mode 100644 index 68553b80b1a..00000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-31d7dfe6b1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3757a90f73f505d40e6e200d1bacbff897f67548 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-6d9c714052.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-6d9c714052.jar.sha1 new file mode 100644 index 00000000000..cbdbd1acdc7 --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-6d9c714052.jar.sha1 @@ -0,0 +1 @@ +0bba71a2e8bfd1c15db407ff06ee4185a091d5ec \ No newline at end of file diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java index ebd44a44abc..c297604fb23 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/arithmetic/Neg.java @@ -11,7 +11,6 @@ import org.elasticsearch.xpack.sql.expression.Expressions.ParamOrdinal; import org.elasticsearch.xpack.sql.expression.NamedExpression; import org.elasticsearch.xpack.sql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.processor.Processor; -import org.elasticsearch.xpack.sql.expression.gen.script.ScriptWeaver; import org.elasticsearch.xpack.sql.expression.gen.script.Scripts; import org.elasticsearch.xpack.sql.expression.predicate.operator.arithmetic.UnaryArithmeticProcessor.UnaryArithmeticOperation; import org.elasticsearch.xpack.sql.tree.Location; @@ -21,7 +20,7 @@ import org.elasticsearch.xpack.sql.type.DataType; /** * Negation function (@{code -x}). */ -public class Neg extends UnaryScalarFunction implements ScriptWeaver { +public class Neg extends UnaryScalarFunction { public Neg(Location location, Expression field) { super(location, field); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java index 42c28016e4b..329dc307da8 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/operator/comparison/In.java @@ -5,15 +5,12 @@ */ package org.elasticsearch.xpack.sql.expression.predicate.operator.comparison; -import org.elasticsearch.xpack.sql.expression.Attribute; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.Expressions; import org.elasticsearch.xpack.sql.expression.Foldables; -import org.elasticsearch.xpack.sql.expression.NamedExpression; -import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunctionAttribute; +import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.expression.gen.pipeline.Pipe; import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; -import org.elasticsearch.xpack.sql.expression.gen.script.ScriptWeaver; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.NodeInfo; import org.elasticsearch.xpack.sql.type.DataType; @@ -29,14 +26,13 @@ import java.util.stream.Collectors; import static org.elasticsearch.xpack.sql.expression.gen.script.ParamsBuilder.paramsBuilder; -public class In extends NamedExpression implements ScriptWeaver { +public class In extends ScalarFunction { private final Expression value; private final List list; - private Attribute lazyAttribute; public In(Location location, Expression value, List list) { - super(location, null, CollectionUtils.combine(list, value), null); + super(location, CollectionUtils.combine(list, value)); this.value = value; this.list = new ArrayList<>(new LinkedHashSet<>(list)); } @@ -95,15 +91,6 @@ public class In extends NamedExpression implements ScriptWeaver { return Expressions.name(value) + sj.toString(); } - @Override - public Attribute toAttribute() { - if (lazyAttribute == null) { - lazyAttribute = new ScalarFunctionAttribute(location(), name(), dataType(), null, - false, id(), false, "IN", asScript(), null, asPipe()); - } - return lazyAttribute; - } - @Override public ScriptTemplate asScript() { ScriptTemplate leftScript = asScript(value); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java index f58d290d0e8..2713a01816c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/planner/QueryTranslator.java @@ -30,8 +30,6 @@ import org.elasticsearch.xpack.sql.expression.function.aggregate.Sum; import org.elasticsearch.xpack.sql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeFunction; import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeHistogramFunction; -import org.elasticsearch.xpack.sql.expression.gen.script.ScriptTemplate; -import org.elasticsearch.xpack.sql.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.sql.expression.predicate.Range; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MatchQueryPredicate; import org.elasticsearch.xpack.sql.expression.predicate.fulltext.MultiMatchQueryPredicate; @@ -40,6 +38,7 @@ import org.elasticsearch.xpack.sql.expression.predicate.logical.And; import org.elasticsearch.xpack.sql.expression.predicate.logical.Not; import org.elasticsearch.xpack.sql.expression.predicate.logical.Or; import org.elasticsearch.xpack.sql.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.sql.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.sql.expression.predicate.operator.comparison.GreaterThan; @@ -94,6 +93,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Optional; +import java.util.function.Supplier; import static java.util.Collections.singletonList; import static org.elasticsearch.xpack.sql.expression.Foldables.doubleValuesOf; @@ -487,11 +487,8 @@ final class QueryTranslator { if (onAggs) { aggFilter = new AggFilter(not.id().toString(), not.asScript()); } else { - query = new NotQuery(not.location(), toQuery(not.field(), false).query); - // query directly on the field - if (not.field() instanceof FieldAttribute) { - query = wrapIfNested(query, not.field()); - } + query = handleQuery(not, not.field(), + () -> new NotQuery(not.location(), toQuery(not.field(), false).query)); } return new QueryTranslation(query, aggFilter); @@ -508,11 +505,8 @@ final class QueryTranslator { if (onAggs) { aggFilter = new AggFilter(isNotNull.id().toString(), isNotNull.asScript()); } else { - query = new ExistsQuery(isNotNull.location(), nameOf(isNotNull.field())); - // query directly on the field - if (isNotNull.field() instanceof NamedExpression) { - query = wrapIfNested(query, isNotNull.field()); - } + query = handleQuery(isNotNull, isNotNull.field(), + () -> new ExistsQuery(isNotNull.location(), nameOf(isNotNull.field()))); } return new QueryTranslation(query, aggFilter); @@ -529,11 +523,8 @@ final class QueryTranslator { if (onAggs) { aggFilter = new AggFilter(isNull.id().toString(), isNull.asScript()); } else { - query = new NotQuery(isNull.location(), new ExistsQuery(isNull.location(), nameOf(isNull.field()))); - // query directly on the field - if (isNull.field() instanceof NamedExpression) { - query = wrapIfNested(query, isNull.field()); - } + query = handleQuery(isNull, isNull.field(), + () -> new NotQuery(isNull.location(), new ExistsQuery(isNull.location(), nameOf(isNull.field())))); } return new QueryTranslation(query, aggFilter); @@ -564,12 +555,7 @@ final class QueryTranslator { aggFilter = new AggFilter(at.id().toString(), bc.asScript()); } else { - // query directly on the field - if (at instanceof FieldAttribute) { - query = wrapIfNested(translateQuery(bc), ne); - } else { - query = new ScriptQuery(at.location(), bc.asScript()); - } + query = handleQuery(bc, ne, () -> translateQuery(bc)); } return new QueryTranslation(query, aggFilter); } @@ -646,17 +632,11 @@ final class QueryTranslator { // // Agg context means HAVING -> PipelineAggs // - ScriptTemplate script = in.asScript(); if (onAggs) { - aggFilter = new AggFilter(at.id().toString(), script); + aggFilter = new AggFilter(at.id().toString(), in.asScript()); } else { - // query directly on the field - if (at instanceof FieldAttribute) { - query = wrapIfNested(new TermsQuery(in.location(), ne.name(), in.list()), ne); - } else { - query = new ScriptQuery(at.location(), script); - } + query = handleQuery(in, ne, () -> new TermsQuery(in.location(), ne.name(), in.list())); } return new QueryTranslation(query, aggFilter); } @@ -687,16 +667,9 @@ final class QueryTranslator { if (onAggs) { aggFilter = new AggFilter(at.id().toString(), r.asScript()); } else { - // typical range; no scripting involved - if (at instanceof FieldAttribute) { - RangeQuery rangeQuery = new RangeQuery(r.location(), nameOf(r.value()), valueOf(r.lower()), r.includeLower(), - valueOf(r.upper()), r.includeUpper(), dateFormat(r.value())); - query = wrapIfNested(rangeQuery, r.value()); - } - // scripted query - else { - query = new ScriptQuery(at.location(), r.asScript()); - } + query = handleQuery(r, r.value(), + () -> new RangeQuery(r.location(), nameOf(r.value()), valueOf(r.lower()), r.includeLower(), + valueOf(r.upper()), r.includeUpper(), dateFormat(r.value()))); } return new QueryTranslation(query, aggFilter); } else { @@ -845,6 +818,14 @@ final class QueryTranslator { protected abstract QueryTranslation asQuery(E e, boolean onAggs); + + protected static Query handleQuery(ScalarFunction sf, Expression field, Supplier query) { + if (field instanceof FieldAttribute) { + return wrapIfNested(query.get(), field); + } + return new ScriptQuery(sf.location(), sf.asScript()); + } + protected static Query wrapIfNested(Query query, Expression exp) { if (exp instanceof FieldAttribute) { FieldAttribute fa = (FieldAttribute) exp; diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java index 92f953cb147..e2c42874696 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/planner/QueryTranslatorTests.java @@ -163,6 +163,22 @@ public class QueryTranslatorTests extends ESTestCase { assertEquals("Scalar function (LTRIM(keyword)) not allowed (yet) as arguments for LIKE", ex.getMessage()); } + public void testTranslateNotExpression_WhereClause_Painless() { + LogicalPlan p = plan("SELECT * FROM test WHERE NOT(POSITION('x', keyword) = 0)"); + assertTrue(p instanceof Project); + assertTrue(p.children().get(0) instanceof Filter); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertTrue(translation.query instanceof ScriptQuery); + ScriptQuery sc = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.not(" + + "InternalSqlScriptUtils.eq(InternalSqlScriptUtils.position(" + + "params.v0,InternalSqlScriptUtils.docValue(doc,params.v1)),params.v2)))", + sc.script().toString()); + assertEquals("[{v=x}, {v=keyword}, {v=0}]", sc.script().params().toString()); + } + public void testTranslateIsNullExpression_WhereClause() { LogicalPlan p = plan("SELECT * FROM test WHERE keyword IS NULL"); assertTrue(p instanceof Project); @@ -178,6 +194,21 @@ public class QueryTranslatorTests extends ESTestCase { eq.asBuilder().toString().replaceAll("\\s+", "")); } + public void testTranslateIsNullExpression_WhereClause_Painless() { + LogicalPlan p = plan("SELECT * FROM test WHERE POSITION('x', keyword) IS NULL"); + assertTrue(p instanceof Project); + assertTrue(p.children().get(0) instanceof Filter); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertTrue(translation.query instanceof ScriptQuery); + ScriptQuery sc = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.isNull(" + + "InternalSqlScriptUtils.position(params.v0,InternalSqlScriptUtils.docValue(doc,params.v1))))", + sc.script().toString()); + assertEquals("[{v=x}, {v=keyword}]", sc.script().params().toString()); + } + public void testTranslateIsNotNullExpression_WhereClause() { LogicalPlan p = plan("SELECT * FROM test WHERE keyword IS NOT NULL"); assertTrue(p instanceof Project); @@ -191,6 +222,21 @@ public class QueryTranslatorTests extends ESTestCase { eq.asBuilder().toString().replaceAll("\\s+", "")); } + public void testTranslateIsNotNullExpression_WhereClause_Painless() { + LogicalPlan p = plan("SELECT * FROM test WHERE POSITION('x', keyword) IS NOT NULL"); + assertTrue(p instanceof Project); + assertTrue(p.children().get(0) instanceof Filter); + Expression condition = ((Filter) p.children().get(0)).condition(); + assertFalse(condition.foldable()); + QueryTranslation translation = QueryTranslator.toQuery(condition, false); + assertTrue(translation.query instanceof ScriptQuery); + ScriptQuery sc = (ScriptQuery) translation.query; + assertEquals("InternalSqlScriptUtils.nullSafeFilter(InternalSqlScriptUtils.isNotNull(" + + "InternalSqlScriptUtils.position(params.v0,InternalSqlScriptUtils.docValue(doc,params.v1))))", + sc.script().toString()); + assertEquals("[{v=x}, {v=keyword}]", sc.script().params().toString()); + } + public void testTranslateIsNullExpression_HavingClause_Painless() { LogicalPlan p = plan("SELECT keyword, max(int) FROM test GROUP BY keyword HAVING max(int) IS NULL"); assertTrue(p instanceof Project);