Merge remote-tracking branch 'origin/master' into index-lifecycle
This commit is contained in:
commit
90c55f5e36
|
@ -4,7 +4,7 @@
|
|||
# build and test Elasticsearch for this branch. Valid Java versions
|
||||
# are 'java' or 'openjdk' followed by the major release number.
|
||||
|
||||
ES_BUILD_JAVA=java10
|
||||
ES_BUILD_JAVA=java11
|
||||
ES_RUNTIME_JAVA=java8
|
||||
GRADLE_TASK=build
|
||||
GRADLE_EXTRA_ARGS=-Dtests.bwc.refspec=elastic/index-lifecycle-6.x
|
||||
|
|
|
@ -6,5 +6,4 @@
|
|||
# or 'openjdk' followed by the major release number.
|
||||
|
||||
ES_BUILD_JAVA:
|
||||
- java10
|
||||
- java11
|
||||
|
|
|
@ -8,5 +8,4 @@
|
|||
ES_RUNTIME_JAVA:
|
||||
- java8
|
||||
- java8fips
|
||||
- java10
|
||||
- java11
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.10
|
||||
1.11
|
|
@ -538,8 +538,20 @@ final class RequestConverters {
|
|||
return request;
|
||||
}
|
||||
|
||||
static Request rethrottle(RethrottleRequest rethrottleRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder().addPathPart("_reindex").addPathPart(rethrottleRequest.getTaskId().toString())
|
||||
static Request rethrottleReindex(RethrottleRequest rethrottleRequest) {
|
||||
return rethrottle(rethrottleRequest, "_reindex");
|
||||
}
|
||||
|
||||
static Request rethrottleUpdateByQuery(RethrottleRequest rethrottleRequest) {
|
||||
return rethrottle(rethrottleRequest, "_update_by_query");
|
||||
}
|
||||
|
||||
static Request rethrottleDeleteByQuery(RethrottleRequest rethrottleRequest) {
|
||||
return rethrottle(rethrottleRequest, "_delete_by_query");
|
||||
}
|
||||
|
||||
private static Request rethrottle(RethrottleRequest rethrottleRequest, String firstPathPart) {
|
||||
String endpoint = new EndpointBuilder().addPathPart(firstPathPart).addPathPart(rethrottleRequest.getTaskId().toString())
|
||||
.addPathPart("_rethrottle").build();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
Params params = new Params(request)
|
||||
|
|
|
@ -528,6 +528,62 @@ public class RestHighLevelClient implements Closeable {
|
|||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a delete by query rethrottle request.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html">
|
||||
* Delete By Query API on elastic.co</a>
|
||||
* @param rethrottleRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
public final ListTasksResponse deleteByQueryRethrottle(RethrottleRequest rethrottleRequest, RequestOptions options) throws IOException {
|
||||
return performRequestAndParseEntity(rethrottleRequest, RequestConverters::rethrottleDeleteByQuery, options,
|
||||
ListTasksResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously execute an delete by query rethrottle request.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html">
|
||||
* Delete By Query API on elastic.co</a>
|
||||
* @param rethrottleRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public final void deleteByQueryRethrottleAsync(RethrottleRequest rethrottleRequest, RequestOptions options,
|
||||
ActionListener<ListTasksResponse> listener) {
|
||||
performRequestAsyncAndParseEntity(rethrottleRequest, RequestConverters::rethrottleDeleteByQuery, options,
|
||||
ListTasksResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a update by query rethrottle request.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html">
|
||||
* Update By Query API on elastic.co</a>
|
||||
* @param rethrottleRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
public final ListTasksResponse updateByQueryRethrottle(RethrottleRequest rethrottleRequest, RequestOptions options) throws IOException {
|
||||
return performRequestAndParseEntity(rethrottleRequest, RequestConverters::rethrottleUpdateByQuery, options,
|
||||
ListTasksResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously execute an update by query rethrottle request.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html">
|
||||
* Update By Query API on elastic.co</a>
|
||||
* @param rethrottleRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public final void updateByQueryRethrottleAsync(RethrottleRequest rethrottleRequest, RequestOptions options,
|
||||
ActionListener<ListTasksResponse> listener) {
|
||||
performRequestAsyncAndParseEntity(rethrottleRequest, RequestConverters::rethrottleUpdateByQuery, options,
|
||||
ListTasksResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a reindex rethrottling request.
|
||||
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html#docs-reindex-rethrottle">
|
||||
|
@ -539,8 +595,8 @@ public class RestHighLevelClient implements Closeable {
|
|||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
public final ListTasksResponse reindexRethrottle(RethrottleRequest rethrottleRequest, RequestOptions options) throws IOException {
|
||||
return performRequestAndParseEntity(rethrottleRequest, RequestConverters::rethrottle, options, ListTasksResponse::fromXContent,
|
||||
emptySet());
|
||||
return performRequestAndParseEntity(rethrottleRequest, RequestConverters::rethrottleReindex, options,
|
||||
ListTasksResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -553,9 +609,9 @@ public class RestHighLevelClient implements Closeable {
|
|||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public final void reindexRethrottleAsync(RethrottleRequest rethrottleRequest, RequestOptions options,
|
||||
ActionListener<ListTasksResponse> listener) {
|
||||
performRequestAsyncAndParseEntity(rethrottleRequest, RequestConverters::rethrottle, options, ListTasksResponse::fromXContent,
|
||||
listener, emptySet());
|
||||
ActionListener<ListTasksResponse> listener) {
|
||||
performRequestAsyncAndParseEntity(rethrottleRequest, RequestConverters::rethrottleReindex, options, ListTasksResponse::fromXContent,
|
||||
listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.client.watcher.AckWatchRequest;
|
||||
import org.elasticsearch.client.watcher.AckWatchResponse;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
|
@ -91,4 +93,32 @@ public final class WatcherClient {
|
|||
restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::deleteWatch, options,
|
||||
DeleteWatchResponse::fromXContent, listener, singleton(404));
|
||||
}
|
||||
|
||||
/**
|
||||
* Acknowledges a watch.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html">
|
||||
* the docs</a> for more information.
|
||||
* @param request the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException if there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
public AckWatchResponse ackWatch(AckWatchRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(request, WatcherRequestConverters::ackWatch, options,
|
||||
AckWatchResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously acknowledges a watch.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-ack-watch.html">
|
||||
* the docs</a> for more information.
|
||||
* @param request the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon completion of the request
|
||||
*/
|
||||
public void ackWatchAsync(AckWatchRequest request, RequestOptions options, ActionListener<AckWatchResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request, WatcherRequestConverters::ackWatch, options,
|
||||
AckWatchResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.http.client.methods.HttpDelete;
|
|||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.elasticsearch.client.watcher.AckWatchRequest;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
|
@ -59,4 +60,17 @@ public class WatcherRequestConverters {
|
|||
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
|
||||
return request;
|
||||
}
|
||||
|
||||
public static Request ackWatch(AckWatchRequest ackWatchRequest) {
|
||||
String endpoint = new RequestConverters.EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("watcher")
|
||||
.addPathPartAsIs("watch")
|
||||
.addPathPart(ackWatchRequest.getWatchId())
|
||||
.addPathPartAsIs("_ack")
|
||||
.addCommaSeparatedPathParts(ackWatchRequest.getActionIds())
|
||||
.build();
|
||||
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
|
||||
return request;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.elasticsearch.client.Validatable;
|
||||
import org.elasticsearch.client.ValidationException;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* A request to explicitly acknowledge a watch.
|
||||
*/
|
||||
public class AckWatchRequest implements Validatable {
|
||||
|
||||
private final String watchId;
|
||||
private final String[] actionIds;
|
||||
|
||||
public AckWatchRequest(String watchId, String... actionIds) {
|
||||
validateIds(watchId, actionIds);
|
||||
this.watchId = watchId;
|
||||
this.actionIds = actionIds;
|
||||
}
|
||||
|
||||
private void validateIds(String watchId, String... actionIds) {
|
||||
ValidationException exception = new ValidationException();
|
||||
if (watchId == null) {
|
||||
exception.addValidationError("watch id is missing");
|
||||
} else if (PutWatchRequest.isValidId(watchId) == false) {
|
||||
exception.addValidationError("watch id contains whitespace");
|
||||
}
|
||||
|
||||
if (actionIds != null) {
|
||||
for (String actionId : actionIds) {
|
||||
if (actionId == null) {
|
||||
exception.addValidationError(String.format(Locale.ROOT, "action id may not be null"));
|
||||
} else if (PutWatchRequest.isValidId(actionId) == false) {
|
||||
exception.addValidationError(
|
||||
String.format(Locale.ROOT, "action id [%s] contains whitespace", actionId));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!exception.validationErrors().isEmpty()) {
|
||||
throw exception;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The ID of the watch to be acked.
|
||||
*/
|
||||
public String getWatchId() {
|
||||
return watchId;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The IDs of the actions to be acked. If omitted,
|
||||
* all actions for the given watch will be acknowledged.
|
||||
*/
|
||||
public String[] getActionIds() {
|
||||
return actionIds;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder("ack [").append(watchId).append("]");
|
||||
if (actionIds.length > 0) {
|
||||
sb.append("[");
|
||||
for (int i = 0; i < actionIds.length; i++) {
|
||||
if (i > 0) {
|
||||
sb.append(", ");
|
||||
}
|
||||
sb.append(actionIds[i]);
|
||||
}
|
||||
sb.append("]");
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* The response from an 'ack watch' request.
|
||||
*/
|
||||
public class AckWatchResponse {
|
||||
|
||||
private final WatchStatus status;
|
||||
|
||||
public AckWatchResponse(WatchStatus status) {
|
||||
this.status = status;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the status of the requested watch. If an action was
|
||||
* successfully acknowledged, this will be reflected in its status.
|
||||
*/
|
||||
public WatchStatus getStatus() {
|
||||
return status;
|
||||
}
|
||||
|
||||
private static final ParseField STATUS_FIELD = new ParseField("status");
|
||||
private static ConstructingObjectParser<AckWatchResponse, Void> PARSER =
|
||||
new ConstructingObjectParser<>("ack_watch_response", true,
|
||||
a -> new AckWatchResponse((WatchStatus) a[0]));
|
||||
|
||||
static {
|
||||
PARSER.declareObject(ConstructingObjectParser.constructorArg(),
|
||||
(parser, context) -> WatchStatus.parse(parser),
|
||||
STATUS_FIELD);
|
||||
}
|
||||
|
||||
public static AckWatchResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
}
|
|
@ -55,9 +55,11 @@ import org.elasticsearch.index.VersionType;
|
|||
import org.elasticsearch.index.get.GetResult;
|
||||
import org.elasticsearch.index.query.IdsQueryBuilder;
|
||||
import org.elasticsearch.index.reindex.BulkByScrollResponse;
|
||||
import org.elasticsearch.index.reindex.DeleteByQueryAction;
|
||||
import org.elasticsearch.index.reindex.DeleteByQueryRequest;
|
||||
import org.elasticsearch.index.reindex.ReindexAction;
|
||||
import org.elasticsearch.index.reindex.ReindexRequest;
|
||||
import org.elasticsearch.index.reindex.UpdateByQueryAction;
|
||||
import org.elasticsearch.index.reindex.UpdateByQueryRequest;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
@ -727,10 +729,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
});
|
||||
|
||||
TaskGroup taskGroupToRethrottle = findTaskToRethrottle();
|
||||
assertThat(taskGroupToRethrottle.getChildTasks(), empty());
|
||||
TaskId taskIdToRethrottle = taskGroupToRethrottle.getTaskInfo().getTaskId();
|
||||
|
||||
TaskId taskIdToRethrottle = findTaskToRethrottle(ReindexAction.NAME);
|
||||
float requestsPerSecond = 1000f;
|
||||
ListTasksResponse response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond),
|
||||
highLevelClient()::reindexRethrottle, highLevelClient()::reindexRethrottleAsync);
|
||||
|
@ -752,10 +751,10 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private TaskGroup findTaskToRethrottle() throws IOException {
|
||||
private TaskId findTaskToRethrottle(String actionName) throws IOException {
|
||||
long start = System.nanoTime();
|
||||
ListTasksRequest request = new ListTasksRequest();
|
||||
request.setActions(ReindexAction.NAME);
|
||||
request.setActions(actionName);
|
||||
request.setDetailed(true);
|
||||
do {
|
||||
ListTasksResponse list = highLevelClient().tasks().list(request, RequestOptions.DEFAULT);
|
||||
|
@ -766,13 +765,15 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
// The parent task hasn't started yet
|
||||
continue;
|
||||
}
|
||||
return list.getTaskGroups().get(0);
|
||||
TaskGroup taskGroup = list.getTaskGroups().get(0);
|
||||
assertThat(taskGroup.getChildTasks(), empty());
|
||||
return taskGroup.getTaskInfo().getTaskId();
|
||||
} while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10));
|
||||
throw new AssertionError("Couldn't find tasks to rethrottle. Here are the running tasks " +
|
||||
highLevelClient().tasks().list(request, RequestOptions.DEFAULT));
|
||||
}
|
||||
|
||||
public void testUpdateByQuery() throws IOException {
|
||||
public void testUpdateByQuery() throws Exception {
|
||||
final String sourceIndex = "source1";
|
||||
{
|
||||
// Prepare
|
||||
|
@ -836,9 +837,53 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
.getSourceAsMap().get("foo"))
|
||||
);
|
||||
}
|
||||
{
|
||||
// test update-by-query rethrottling
|
||||
UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest();
|
||||
updateByQueryRequest.indices(sourceIndex);
|
||||
updateByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1").types("type"));
|
||||
updateByQueryRequest.setRefresh(true);
|
||||
|
||||
// this following settings are supposed to halt reindexing after first document
|
||||
updateByQueryRequest.setBatchSize(1);
|
||||
updateByQueryRequest.setRequestsPerSecond(0.00001f);
|
||||
final CountDownLatch taskFinished = new CountDownLatch(1);
|
||||
highLevelClient().updateByQueryAsync(updateByQueryRequest, RequestOptions.DEFAULT, new ActionListener<BulkByScrollResponse>() {
|
||||
|
||||
@Override
|
||||
public void onResponse(BulkByScrollResponse response) {
|
||||
taskFinished.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
fail(e.toString());
|
||||
}
|
||||
});
|
||||
|
||||
TaskId taskIdToRethrottle = findTaskToRethrottle(UpdateByQueryAction.NAME);
|
||||
float requestsPerSecond = 1000f;
|
||||
ListTasksResponse response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond),
|
||||
highLevelClient()::updateByQueryRethrottle, highLevelClient()::updateByQueryRethrottleAsync);
|
||||
assertThat(response.getTasks(), hasSize(1));
|
||||
assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId());
|
||||
assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class));
|
||||
assertEquals(Float.toString(requestsPerSecond),
|
||||
((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString());
|
||||
taskFinished.await(2, TimeUnit.SECONDS);
|
||||
|
||||
// any rethrottling after the update-by-query is done performed with the same taskId should result in a failure
|
||||
response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond),
|
||||
highLevelClient()::updateByQueryRethrottle, highLevelClient()::updateByQueryRethrottleAsync);
|
||||
assertTrue(response.getTasks().isEmpty());
|
||||
assertFalse(response.getNodeFailures().isEmpty());
|
||||
assertEquals(1, response.getNodeFailures().size());
|
||||
assertEquals("Elasticsearch exception [type=resource_not_found_exception, reason=task [" + taskIdToRethrottle + "] is missing]",
|
||||
response.getNodeFailures().get(0).getCause().getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeleteByQuery() throws IOException {
|
||||
public void testDeleteByQuery() throws Exception {
|
||||
final String sourceIndex = "source1";
|
||||
{
|
||||
// Prepare
|
||||
|
@ -855,6 +900,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
.source(Collections.singletonMap("foo", 1), XContentType.JSON))
|
||||
.add(new IndexRequest(sourceIndex, "type", "2")
|
||||
.source(Collections.singletonMap("foo", 2), XContentType.JSON))
|
||||
.add(new IndexRequest(sourceIndex, "type", "3")
|
||||
.source(Collections.singletonMap("foo", 3), XContentType.JSON))
|
||||
.setRefreshPolicy(RefreshPolicy.IMMEDIATE),
|
||||
RequestOptions.DEFAULT
|
||||
).status()
|
||||
|
@ -878,10 +925,54 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
|||
assertEquals(0, bulkResponse.getBulkFailures().size());
|
||||
assertEquals(0, bulkResponse.getSearchFailures().size());
|
||||
assertEquals(
|
||||
1,
|
||||
2,
|
||||
highLevelClient().search(new SearchRequest(sourceIndex), RequestOptions.DEFAULT).getHits().totalHits
|
||||
);
|
||||
}
|
||||
{
|
||||
// test delete-by-query rethrottling
|
||||
DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest();
|
||||
deleteByQueryRequest.indices(sourceIndex);
|
||||
deleteByQueryRequest.setQuery(new IdsQueryBuilder().addIds("2", "3").types("type"));
|
||||
deleteByQueryRequest.setRefresh(true);
|
||||
|
||||
// this following settings are supposed to halt reindexing after first document
|
||||
deleteByQueryRequest.setBatchSize(1);
|
||||
deleteByQueryRequest.setRequestsPerSecond(0.00001f);
|
||||
final CountDownLatch taskFinished = new CountDownLatch(1);
|
||||
highLevelClient().deleteByQueryAsync(deleteByQueryRequest, RequestOptions.DEFAULT, new ActionListener<BulkByScrollResponse>() {
|
||||
|
||||
@Override
|
||||
public void onResponse(BulkByScrollResponse response) {
|
||||
taskFinished.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
fail(e.toString());
|
||||
}
|
||||
});
|
||||
|
||||
TaskId taskIdToRethrottle = findTaskToRethrottle(DeleteByQueryAction.NAME);
|
||||
float requestsPerSecond = 1000f;
|
||||
ListTasksResponse response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond),
|
||||
highLevelClient()::deleteByQueryRethrottle, highLevelClient()::deleteByQueryRethrottleAsync);
|
||||
assertThat(response.getTasks(), hasSize(1));
|
||||
assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId());
|
||||
assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class));
|
||||
assertEquals(Float.toString(requestsPerSecond),
|
||||
((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString());
|
||||
taskFinished.await(2, TimeUnit.SECONDS);
|
||||
|
||||
// any rethrottling after the delete-by-query is done performed with the same taskId should result in a failure
|
||||
response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond),
|
||||
highLevelClient()::deleteByQueryRethrottle, highLevelClient()::deleteByQueryRethrottleAsync);
|
||||
assertTrue(response.getTasks().isEmpty());
|
||||
assertFalse(response.getNodeFailures().isEmpty());
|
||||
assertEquals(1, response.getNodeFailures().size());
|
||||
assertEquals("Elasticsearch exception [type=resource_not_found_exception, reason=task [" + taskIdToRethrottle + "] is missing]",
|
||||
response.getNodeFailures().get(0).getCause().getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testBulkProcessorIntegration() throws IOException {
|
||||
|
|
|
@ -64,6 +64,7 @@ import org.elasticsearch.common.CheckedBiConsumer;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -476,7 +477,7 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
assertToXContentBody(deleteByQueryRequest, request.getEntity());
|
||||
}
|
||||
|
||||
public void testRethrottle() throws IOException {
|
||||
public void testRethrottle() {
|
||||
TaskId taskId = new TaskId(randomAlphaOfLength(10), randomIntBetween(1, 100));
|
||||
RethrottleRequest rethrottleRequest;
|
||||
Float requestsPerSecond;
|
||||
|
@ -490,11 +491,20 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
expectedParams.put(RethrottleRequest.REQUEST_PER_SECOND_PARAMETER, "-1");
|
||||
}
|
||||
expectedParams.put("group_by", "none");
|
||||
Request request = RequestConverters.rethrottle(rethrottleRequest);
|
||||
assertEquals("/_reindex/" + taskId + "/_rethrottle", request.getEndpoint());
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertNull(request.getEntity());
|
||||
List<Tuple<String, Supplier<Request>>> variants = new ArrayList<>();
|
||||
variants.add(new Tuple<String, Supplier<Request>>("_reindex", () -> RequestConverters.rethrottleReindex(rethrottleRequest)));
|
||||
variants.add(new Tuple<String, Supplier<Request>>("_update_by_query",
|
||||
() -> RequestConverters.rethrottleUpdateByQuery(rethrottleRequest)));
|
||||
variants.add(new Tuple<String, Supplier<Request>>("_delete_by_query",
|
||||
() -> RequestConverters.rethrottleDeleteByQuery(rethrottleRequest)));
|
||||
|
||||
for (Tuple<String, Supplier<Request>> variant : variants) {
|
||||
Request request = variant.v2().get();
|
||||
assertEquals("/" + variant.v1() + "/" + taskId + "/_rethrottle", request.getEndpoint());
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
// test illegal RethrottleRequest values
|
||||
Exception e = expectThrows(NullPointerException.class, () -> new RethrottleRequest(null, 1.0f));
|
||||
|
|
|
@ -18,6 +18,11 @@
|
|||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.client.watcher.AckWatchRequest;
|
||||
import org.elasticsearch.client.watcher.AckWatchResponse;
|
||||
import org.elasticsearch.client.watcher.ActionStatus;
|
||||
import org.elasticsearch.client.watcher.ActionStatus.AckStatus;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
@ -25,6 +30,7 @@ import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
|||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
|
@ -72,4 +78,34 @@ public class WatcherIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testAckWatch() throws Exception {
|
||||
String watchId = randomAlphaOfLength(10);
|
||||
String actionId = "logme";
|
||||
|
||||
PutWatchResponse putWatchResponse = createWatch(watchId);
|
||||
assertThat(putWatchResponse.isCreated(), is(true));
|
||||
|
||||
AckWatchResponse response = highLevelClient().watcher().ackWatch(
|
||||
new AckWatchRequest(watchId, actionId), RequestOptions.DEFAULT);
|
||||
|
||||
ActionStatus actionStatus = response.getStatus().actionStatus(actionId);
|
||||
assertEquals(AckStatus.State.AWAITS_SUCCESSFUL_EXECUTION, actionStatus.ackStatus().state());
|
||||
|
||||
// TODO: use the high-level REST client here once it supports 'execute watch'.
|
||||
Request executeWatchRequest = new Request("POST", "_xpack/watcher/watch/" + watchId + "/_execute");
|
||||
executeWatchRequest.setJsonEntity("{ \"record_execution\": true }");
|
||||
Response executeResponse = client().performRequest(executeWatchRequest);
|
||||
assertEquals(RestStatus.OK.getStatus(), executeResponse.getStatusLine().getStatusCode());
|
||||
|
||||
response = highLevelClient().watcher().ackWatch(
|
||||
new AckWatchRequest(watchId, actionId), RequestOptions.DEFAULT);
|
||||
|
||||
actionStatus = response.getStatus().actionStatus(actionId);
|
||||
assertEquals(AckStatus.State.ACKED, actionStatus.ackStatus().state());
|
||||
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class,
|
||||
() -> highLevelClient().watcher().ackWatch(
|
||||
new AckWatchRequest("nonexistent"), RequestOptions.DEFAULT));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.client;
|
|||
|
||||
import org.apache.http.client.methods.HttpDelete;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.client.watcher.AckWatchRequest;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
||||
|
@ -30,6 +31,7 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import java.io.ByteArrayOutputStream;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.StringJoiner;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
@ -75,4 +77,24 @@ public class WatcherRequestConvertersTests extends ESTestCase {
|
|||
assertEquals("/_xpack/watcher/watch/" + watchId, request.getEndpoint());
|
||||
assertThat(request.getEntity(), nullValue());
|
||||
}
|
||||
|
||||
public void testAckWatch() {
|
||||
String watchId = randomAlphaOfLength(10);
|
||||
String[] actionIds = generateRandomStringArray(5, 10, false, true);
|
||||
|
||||
AckWatchRequest ackWatchRequest = new AckWatchRequest(watchId, actionIds);
|
||||
Request request = WatcherRequestConverters.ackWatch(ackWatchRequest);
|
||||
|
||||
assertEquals(HttpPut.METHOD_NAME, request.getMethod());
|
||||
|
||||
StringJoiner expectedEndpoint = new StringJoiner("/", "/", "")
|
||||
.add("_xpack").add("watcher").add("watch").add(watchId).add("_ack");
|
||||
if (ackWatchRequest.getActionIds().length > 0) {
|
||||
String actionsParam = String.join(",", ackWatchRequest.getActionIds());
|
||||
expectedEndpoint.add(actionsParam);
|
||||
}
|
||||
|
||||
assertEquals(expectedEndpoint.toString(), request.getEndpoint());
|
||||
assertThat(request.getEntity(), nullValue());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -902,19 +902,26 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
TaskId taskId = new TaskId("oTUltX4IQMOUUVeiohTt8A:124");
|
||||
{
|
||||
// tag::rethrottle-disable-request
|
||||
RethrottleRequest rethrottleRequest = new RethrottleRequest(taskId); // <1>
|
||||
client.reindexRethrottle(rethrottleRequest, RequestOptions.DEFAULT);
|
||||
RethrottleRequest request = new RethrottleRequest(taskId); // <1>
|
||||
// end::rethrottle-disable-request
|
||||
}
|
||||
|
||||
{
|
||||
// tag::rethrottle-request
|
||||
RethrottleRequest rethrottleRequest = new RethrottleRequest(taskId, 100.0f); // <1>
|
||||
client.reindexRethrottle(rethrottleRequest, RequestOptions.DEFAULT);
|
||||
RethrottleRequest request = new RethrottleRequest(taskId, 100.0f); // <1>
|
||||
// end::rethrottle-request
|
||||
}
|
||||
|
||||
// tag::rethrottle-request-async
|
||||
{
|
||||
RethrottleRequest request = new RethrottleRequest(taskId);
|
||||
// tag::rethrottle-request-execution
|
||||
client.reindexRethrottle(request, RequestOptions.DEFAULT); // <1>
|
||||
client.updateByQueryRethrottle(request, RequestOptions.DEFAULT); // <2>
|
||||
client.deleteByQueryRethrottle(request, RequestOptions.DEFAULT); // <3>
|
||||
// end::rethrottle-request-execution
|
||||
}
|
||||
|
||||
// tag::rethrottle-request-async-listener
|
||||
ActionListener<ListTasksResponse> listener = new ActionListener<ListTasksResponse>() {
|
||||
@Override
|
||||
public void onResponse(ListTasksResponse response) {
|
||||
|
@ -926,15 +933,17 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
// <2>
|
||||
}
|
||||
};
|
||||
// end::rethrottle-request-async
|
||||
// end::rethrottle-request-async-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
final CountDownLatch latch = new CountDownLatch(3);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
RethrottleRequest rethrottleRequest = new RethrottleRequest(taskId);
|
||||
RethrottleRequest request = new RethrottleRequest(taskId);
|
||||
// tag::rethrottle-execute-async
|
||||
client.reindexRethrottleAsync(rethrottleRequest, RequestOptions.DEFAULT, listener); // <1>
|
||||
client.reindexRethrottleAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
client.updateByQueryRethrottleAsync(request, RequestOptions.DEFAULT, listener); // <2>
|
||||
client.deleteByQueryRethrottleAsync(request, RequestOptions.DEFAULT, listener); // <3>
|
||||
// end::rethrottle-execute-async
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
|
|
@ -21,8 +21,15 @@ package org.elasticsearch.client.documentation;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.watcher.AckWatchRequest;
|
||||
import org.elasticsearch.client.watcher.AckWatchResponse;
|
||||
import org.elasticsearch.client.watcher.ActionStatus;
|
||||
import org.elasticsearch.client.watcher.ActionStatus.AckStatus;
|
||||
import org.elasticsearch.client.watcher.WatchStatus;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
@ -30,6 +37,7 @@ import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
|||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -132,4 +140,67 @@ public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testAckWatch() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
BytesReference watch = new BytesArray("{ \n" +
|
||||
" \"trigger\": { \"schedule\": { \"interval\": \"10h\" } },\n" +
|
||||
" \"input\": { \"simple\": { \"foo\" : \"bar\" } },\n" +
|
||||
" \"actions\": { \"logme\": { \"logging\": { \"text\": \"{{ctx.payload}}\" } } }\n" +
|
||||
"}");
|
||||
PutWatchRequest putWatchRequest = new PutWatchRequest("my_watch_id", watch, XContentType.JSON);
|
||||
client.watcher().putWatch(putWatchRequest, RequestOptions.DEFAULT);
|
||||
|
||||
// TODO: use the high-level REST client here once it supports 'execute watch'.
|
||||
Request executeWatchRequest = new Request("POST", "_xpack/watcher/watch/my_watch_id/_execute");
|
||||
executeWatchRequest.setJsonEntity("{ \"record_execution\": true }");
|
||||
Response executeResponse = client().performRequest(executeWatchRequest);
|
||||
assertEquals(RestStatus.OK.getStatus(), executeResponse.getStatusLine().getStatusCode());
|
||||
}
|
||||
|
||||
{
|
||||
//tag::ack-watch-execute
|
||||
AckWatchRequest request = new AckWatchRequest("my_watch_id", // <1>
|
||||
"logme", "emailme"); // <2>
|
||||
AckWatchResponse response = client.watcher().ackWatch(request, RequestOptions.DEFAULT);
|
||||
//end::ack-watch-execute
|
||||
|
||||
//tag::ack-watch-response
|
||||
WatchStatus watchStatus = response.getStatus();
|
||||
ActionStatus actionStatus = watchStatus.actionStatus("logme"); // <1>
|
||||
AckStatus.State ackState = actionStatus.ackStatus().state(); // <2>
|
||||
//end::ack-watch-response
|
||||
|
||||
assertEquals(AckStatus.State.ACKED, ackState);
|
||||
}
|
||||
|
||||
{
|
||||
AckWatchRequest request = new AckWatchRequest("my_watch_id");
|
||||
// tag::ack-watch-execute-listener
|
||||
ActionListener<AckWatchResponse> listener = new ActionListener<AckWatchResponse>() {
|
||||
@Override
|
||||
public void onResponse(AckWatchResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ack-watch-execute-listener
|
||||
|
||||
// For testing, replace the empty listener by a blocking listener.
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ack-watch-execute-async
|
||||
client.watcher().ackWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ack-watch-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParseException;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.XContentTestUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* Basic unit tests for {@link AckWatchResponse}.
|
||||
*
|
||||
* Note that we only sanity check watch status parsing here, as there
|
||||
* are dedicated tests for it in {@link WatchStatusTests}.
|
||||
*/
|
||||
public class AckWatchResponseTests extends ESTestCase {
|
||||
|
||||
public void testBasicParsing() throws IOException {
|
||||
XContentType contentType = randomFrom(XContentType.values());
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(contentType).startObject()
|
||||
.startObject("status")
|
||||
.field("version", 42)
|
||||
.field("execution_state", ExecutionState.ACKNOWLEDGED)
|
||||
.endObject()
|
||||
.endObject();
|
||||
BytesReference bytes = BytesReference.bytes(builder);
|
||||
|
||||
AckWatchResponse response = parse(builder.contentType(), bytes);
|
||||
WatchStatus status = response.getStatus();
|
||||
assertNotNull(status);
|
||||
assertEquals(42, status.version());
|
||||
assertEquals(ExecutionState.ACKNOWLEDGED, status.getExecutionState());
|
||||
}
|
||||
|
||||
public void testParsingWithMissingStatus() throws IOException {
|
||||
XContentType contentType = randomFrom(XContentType.values());
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(contentType).startObject().endObject();
|
||||
BytesReference bytes = BytesReference.bytes(builder);
|
||||
|
||||
expectThrows(IllegalArgumentException.class, () -> parse(builder.contentType(), bytes));
|
||||
}
|
||||
|
||||
public void testParsingWithNullStatus() throws IOException {
|
||||
XContentType contentType = randomFrom(XContentType.values());
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(contentType).startObject()
|
||||
.nullField("status")
|
||||
.endObject();
|
||||
BytesReference bytes = BytesReference.bytes(builder);
|
||||
|
||||
expectThrows(XContentParseException.class, () -> parse(builder.contentType(), bytes));
|
||||
}
|
||||
|
||||
public void testParsingWithUnknownKeys() throws IOException {
|
||||
XContentType contentType = randomFrom(XContentType.values());
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(contentType).startObject()
|
||||
.startObject("status")
|
||||
.field("version", 42)
|
||||
.field("execution_state", ExecutionState.ACKNOWLEDGED)
|
||||
.endObject()
|
||||
.endObject();
|
||||
BytesReference bytes = BytesReference.bytes(builder);
|
||||
|
||||
Predicate<String> excludeFilter = field -> field.equals("status.actions");
|
||||
BytesReference bytesWithRandomFields = XContentTestUtils.insertRandomFields(
|
||||
builder.contentType(), bytes, excludeFilter, random());
|
||||
|
||||
AckWatchResponse response = parse(builder.contentType(), bytesWithRandomFields);
|
||||
WatchStatus status = response.getStatus();
|
||||
assertNotNull(status);
|
||||
assertEquals(42, status.version());
|
||||
assertEquals(ExecutionState.ACKNOWLEDGED, status.getExecutionState());
|
||||
}
|
||||
|
||||
private AckWatchResponse parse(XContentType contentType, BytesReference bytes) throws IOException {
|
||||
XContentParser parser = XContentFactory.xContent(contentType)
|
||||
.createParser(NamedXContentRegistry.EMPTY, null, bytes.streamInput());
|
||||
parser.nextToken();
|
||||
return AckWatchResponse.fromXContent(parser);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.client.ValidationException;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public class WatchRequestValidationTests extends ESTestCase {
|
||||
|
||||
public void testAcknowledgeWatchInvalidWatchId() {
|
||||
ValidationException e = expectThrows(ValidationException.class,
|
||||
() -> new AckWatchRequest("id with whitespaces"));
|
||||
assertThat(e.validationErrors(), hasItem("watch id contains whitespace"));
|
||||
}
|
||||
|
||||
public void testAcknowledgeWatchInvalidActionId() {
|
||||
ValidationException e = expectThrows(ValidationException.class,
|
||||
() -> new AckWatchRequest("_id", "action id with whitespaces"));
|
||||
assertThat(e.validationErrors(), hasItem("action id [action id with whitespaces] contains whitespace"));
|
||||
}
|
||||
|
||||
public void testAcknowledgeWatchNullActionArray() {
|
||||
// need this to prevent some compilation errors, i.e. in 1.8.0_91
|
||||
String[] nullArray = null;
|
||||
Optional<ValidationException> e = new AckWatchRequest("_id", nullArray).validate();
|
||||
assertFalse(e.isPresent());
|
||||
}
|
||||
|
||||
public void testAcknowledgeWatchNullActionId() {
|
||||
ValidationException e = expectThrows(ValidationException.class,
|
||||
() -> new AckWatchRequest("_id", new String[] {null}));
|
||||
assertThat(e.validationErrors(), hasItem("action id may not be null"));
|
||||
}
|
||||
|
||||
public void testDeleteWatchInvalidWatchId() {
|
||||
ActionRequestValidationException e = new DeleteWatchRequest("id with whitespaces").validate();
|
||||
assertThat(e, is(notNullValue()));
|
||||
assertThat(e.validationErrors(), hasItem("watch id contains whitespace"));
|
||||
}
|
||||
|
||||
public void testDeleteWatchNullId() {
|
||||
ActionRequestValidationException e = new DeleteWatchRequest(null).validate();
|
||||
assertThat(e, is(notNullValue()));
|
||||
assertThat(e.validationErrors(), hasItem("watch id is missing"));
|
||||
}
|
||||
|
||||
public void testPutWatchInvalidWatchId() {
|
||||
ActionRequestValidationException e = new PutWatchRequest("id with whitespaces", BytesArray.EMPTY, XContentType.JSON).validate();
|
||||
assertThat(e, is(notNullValue()));
|
||||
assertThat(e.validationErrors(), hasItem("watch id contains whitespace"));
|
||||
}
|
||||
|
||||
public void testPutWatchNullId() {
|
||||
ActionRequestValidationException e = new PutWatchRequest(null, BytesArray.EMPTY, XContentType.JSON).validate();
|
||||
assertThat(e, is(notNullValue()));
|
||||
assertThat(e.validationErrors(), hasItem("watch id is missing"));
|
||||
}
|
||||
|
||||
public void testPutWatchSourceNull() {
|
||||
ActionRequestValidationException e = new PutWatchRequest("foo", null, XContentType.JSON).validate();
|
||||
assertThat(e, is(notNullValue()));
|
||||
assertThat(e.validationErrors(), hasItem("watch source is missing"));
|
||||
}
|
||||
|
||||
public void testPutWatchContentNull() {
|
||||
ActionRequestValidationException e = new PutWatchRequest("foo", BytesArray.EMPTY, null).validate();
|
||||
assertThat(e, is(notNullValue()));
|
||||
assertThat(e.validationErrors(), hasItem("request body is missing"));
|
||||
}
|
||||
}
|
|
@ -157,8 +157,10 @@ subprojects {
|
|||
environment('JAVA_HOME', getJavaHome(it, 8))
|
||||
} else if ("6.2".equals(bwcBranch)) {
|
||||
environment('JAVA_HOME', getJavaHome(it, 9))
|
||||
} else if (["6.3", "6.4", "6.x"].contains(bwcBranch)) {
|
||||
} else if (["6.3", "6.4"].contains(bwcBranch)) {
|
||||
environment('JAVA_HOME', getJavaHome(it, 10))
|
||||
} else if (["6.x"].contains(bwcBranch)) {
|
||||
environment('JAVA_HOME', getJavaHome(it, 11))
|
||||
} else {
|
||||
environment('JAVA_HOME', project.compilerJavaHome)
|
||||
}
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
[[java-rest-high-document-reindex-rethrottle]]
|
||||
=== Reindex Rethrottle API
|
||||
[[java-rest-high-document-rethrottle]]
|
||||
=== Rethrottle API
|
||||
|
||||
[[java-rest-high-document-reindex-rethrottle-request]]
|
||||
==== Reindex Rethrolle Request
|
||||
[[java-rest-high-document-rethrottle-request]]
|
||||
==== Rethrottle Request
|
||||
|
||||
A `RethrottleRequest` can be used to change existing throttling on a runnind
|
||||
reindex task or disable it entirely. It requires the task Id of the reindex
|
||||
task to change.
|
||||
A `RethrottleRequest` can be used to change the current throttling on a running
|
||||
reindex, update-by-query or delete-by-query task or to disable throttling of
|
||||
the task entirely. It requires the task Id of the task to change.
|
||||
|
||||
In its simplest form, you can use it to disable throttling of a running
|
||||
reindex task using the following:
|
||||
task using the following:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
|
@ -26,7 +26,19 @@ include-tagged::{doc-tests}/CRUDDocumentationIT.java[rethrottle-request]
|
|||
--------------------------------------------------
|
||||
<1> Request to change the throttling of a task to 100 requests per second
|
||||
|
||||
[[java-rest-high-document-reindex-rethrottle-async]]
|
||||
The rethrottling request can be executed by using one of the three appropriate
|
||||
methods depending on whether a reindex, update-by-query or delete-by-query task
|
||||
should be rethrottled:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[rethrottle-request-execution]
|
||||
--------------------------------------------------
|
||||
<1> Execute reindex rethrottling request
|
||||
<2> The same for update-by-query
|
||||
<3> The same for delete-by-query
|
||||
|
||||
[[java-rest-high-document-rethrottle-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a rethrottle request requires both the `RethrottleRequest`
|
||||
|
@ -37,8 +49,9 @@ method:
|
|||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[rethrottle-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The RethrottleRequest to execute and the ActionListener to use when the
|
||||
execution completes
|
||||
<1> Execute reindex rethrottling asynchronously
|
||||
<2> The same for update-by-query
|
||||
<3> The same for delete-by-query
|
||||
|
||||
The asynchronous method does not block and returns immediately.
|
||||
Once it is completed the `ActionListener` is called back using the `onResponse` method
|
||||
|
@ -47,12 +60,12 @@ it failed. A typical listener looks like this:
|
|||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[rethrottle-request-async]
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[rethrottle-request-async-listener]
|
||||
--------------------------------------------------
|
||||
<1> Code executed when the request is successfully completed
|
||||
<2> Code executed when the request fails with an exception
|
||||
|
||||
[[java-rest-high-document-reindex-retrottle-response]]
|
||||
[[java-rest-high-document-retrottle-response]]
|
||||
==== Rethrottle Response
|
||||
|
||||
Rethrottling returns the task that has been rethrottled in the form of a
|
|
@ -21,7 +21,7 @@ Multi-document APIs::
|
|||
* <<{upid}-reindex>>
|
||||
* <<{upid}-update-by-query>>
|
||||
* <<{upid}-delete-by-query>>
|
||||
* <<{upid}-reindex-rethrottle>>
|
||||
* <<{upid}-rethrottle>>
|
||||
|
||||
include::document/index.asciidoc[]
|
||||
include::document/get.asciidoc[]
|
||||
|
@ -33,7 +33,7 @@ include::document/multi-get.asciidoc[]
|
|||
include::document/reindex.asciidoc[]
|
||||
include::document/update-by-query.asciidoc[]
|
||||
include::document/delete-by-query.asciidoc[]
|
||||
include::document/reindex-rethrottle.asciidoc[]
|
||||
include::document/rethrottle.asciidoc[]
|
||||
|
||||
== Search APIs
|
||||
|
||||
|
@ -310,9 +310,11 @@ The Java High Level REST Client supports the following Watcher APIs:
|
|||
|
||||
* <<java-rest-high-x-pack-watcher-put-watch>>
|
||||
* <<java-rest-high-x-pack-watcher-delete-watch>>
|
||||
* <<java-rest-high-watcher-ack-watch>>
|
||||
|
||||
include::watcher/put-watch.asciidoc[]
|
||||
include::watcher/delete-watch.asciidoc[]
|
||||
include::watcher/ack-watch.asciidoc[]
|
||||
|
||||
== Graph APIs
|
||||
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
[[java-rest-high-watcher-ack-watch]]
|
||||
=== Ack Watch API
|
||||
|
||||
[[java-rest-high-watcher-ack-watch-execution]]
|
||||
==== Execution
|
||||
|
||||
{xpack-ref}/actions.html#actions-ack-throttle[Acknowledging a watch] enables you
|
||||
to manually throttle execution of a watch's actions. A watch can be acknowledged
|
||||
through the following request:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/WatcherDocumentationIT.java[ack-watch-execute]
|
||||
--------------------------------------------------
|
||||
<1> The ID of the watch to ack.
|
||||
<2> An optional list of IDs representing the watch actions that should be acked.
|
||||
If no action IDs are provided, then all of the watch's actions will be acked.
|
||||
|
||||
[[java-rest-high-watcher-ack-watch-response]]
|
||||
==== Response
|
||||
|
||||
The returned `AckWatchResponse` contains the new status of the requested watch:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/WatcherDocumentationIT.java[ack-watch-response]
|
||||
--------------------------------------------------
|
||||
<1> The status of a specific action that was acked.
|
||||
<2> The acknowledgement state of the action. If the action was successfully
|
||||
acked, this state will be equal to `AckStatus.State.ACKED`.
|
||||
|
||||
[[java-rest-high-watcher-ack-watch-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
This request can be executed asynchronously:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/WatcherDocumentationIT.java[ack-watch-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `AckWatchRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes.
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once the request
|
||||
completes, the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A listener for `AckWatchResponse` can be constructed as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/WatcherDocumentationIT.java[ack-watch-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument.
|
||||
<2> Called in case of failure. The raised exception is provided as an argument.
|
|
@ -26,12 +26,17 @@ The following gce settings (prefixed with `cloud.gce`) are supported:
|
|||
|
||||
`project_id`::
|
||||
|
||||
Your Google project id (mandatory).
|
||||
Your Google project id.
|
||||
By default the project id will be derived from the instance metadata.
|
||||
|
||||
Note: Deriving the project id from system properties or environment variables
|
||||
(`GOOGLE_CLOUD_PROJECT` or `GCLOUD_PROJECT`) is not supported.
|
||||
|
||||
`zone`::
|
||||
|
||||
helps to retrieve instances running in a given zone (mandatory). It should be one of the
|
||||
https://developers.google.com/compute/docs/zones#available[GCE supported zones].
|
||||
helps to retrieve instances running in a given zone.
|
||||
It should be one of the https://developers.google.com/compute/docs/zones#available[GCE supported zones].
|
||||
By default the zone will be derived from the instance metadata.
|
||||
See also <<discovery-gce-usage-zones>>.
|
||||
|
||||
`retry`::
|
||||
|
|
|
@ -486,7 +486,7 @@ If we study the above commands carefully, we can actually see a pattern of how w
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
<REST Verb> /<Index>/<Type>/<ID>
|
||||
<HTTP Verb> /<Index>/<Type>/<ID>
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
|
|
|
@ -26,3 +26,9 @@ has been removed. `missing_bucket` should be used instead.
|
|||
The object used to share aggregation state between the scripts in a Scripted Metric
|
||||
Aggregation is now a variable called `state` available in the script context, rather than
|
||||
being provided via the `params` object as `params._agg`.
|
||||
|
||||
[float]
|
||||
==== Make metric aggregation script parameters `reduce_script` and `combine_script` mandatory
|
||||
|
||||
The metric aggregation has been changed to require these two script parameters to ensure users are
|
||||
explicitly defining how their data is processed.
|
||||
|
|
|
@ -26,6 +26,11 @@ dependencyLicenses {
|
|||
mapping from: /google-.*/, to: 'google'
|
||||
}
|
||||
|
||||
check {
|
||||
// also execute the QA tests when testing the plugin
|
||||
dependsOn 'qa:gce:check'
|
||||
}
|
||||
|
||||
test {
|
||||
// this is needed for insecure plugins, remove if possible!
|
||||
systemProperty 'tests.artifact', project.name
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
group = "${group}.plugins.discovery-gce.qa"
|
|
@ -0,0 +1,80 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.MavenFilteringHack
|
||||
import org.elasticsearch.gradle.test.AntFixture
|
||||
|
||||
apply plugin: 'elasticsearch.standalone-rest-test'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
final int gceNumberOfNodes = 3
|
||||
File gceDiscoveryFile = new File(project.buildDir, 'generated-resources/nodes.uri')
|
||||
|
||||
dependencies {
|
||||
testCompile project(path: ':plugins:discovery-gce', configuration: 'runtime')
|
||||
}
|
||||
|
||||
/** A task to start the GCEFixture which emulates a GCE service **/
|
||||
task gceFixture(type: AntFixture) {
|
||||
dependsOn compileTestJava
|
||||
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
|
||||
executable = new File(project.runtimeJavaHome, 'bin/java')
|
||||
args 'org.elasticsearch.cloud.gce.GCEFixture', baseDir, gceDiscoveryFile.getAbsolutePath()
|
||||
}
|
||||
|
||||
Map<String, Object> expansions = [
|
||||
'expected_nodes': gceNumberOfNodes
|
||||
]
|
||||
|
||||
processTestResources {
|
||||
inputs.properties(expansions)
|
||||
MavenFilteringHack.filter(it, expansions)
|
||||
}
|
||||
|
||||
integTestCluster {
|
||||
dependsOn gceFixture
|
||||
numNodes = gceNumberOfNodes
|
||||
plugin ':plugins:discovery-gce'
|
||||
setting 'discovery.zen.hosts_provider', 'gce'
|
||||
|
||||
// use gce fixture for Auth calls instead of http://metadata.google.internal
|
||||
integTestCluster.environment 'GCE_METADATA_HOST', "http://${-> gceFixture.addressAndPort}"
|
||||
|
||||
// allows to configure hidden settings (`cloud.gce.host` and `cloud.gce.root_url`)
|
||||
systemProperty 'es.allow_reroute_gce_settings', 'true'
|
||||
|
||||
// use gce fixture for metadata server calls instead of http://metadata.google.internal
|
||||
setting 'cloud.gce.host', "http://${-> gceFixture.addressAndPort}"
|
||||
// use gce fixture for API calls instead of https://www.googleapis.com
|
||||
setting 'cloud.gce.root_url', "http://${-> gceFixture.addressAndPort}"
|
||||
|
||||
unicastTransportUri = { seedNode, node, ant -> return null }
|
||||
|
||||
waitCondition = { node, ant ->
|
||||
gceDiscoveryFile.parentFile.mkdirs()
|
||||
gceDiscoveryFile.setText(integTest.nodes.collect { n -> "${n.transportUri()}" }.join('\n'), 'UTF-8')
|
||||
|
||||
File tmpFile = new File(node.cwd, 'wait.success')
|
||||
ant.get(src: "http://${node.httpUri()}/",
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true,
|
||||
retries: 10)
|
||||
return tmpFile.exists()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cloud.gce;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
|
||||
public class GCEDiscoveryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public GCEDiscoveryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws Exception {
|
||||
return ESClientYamlSuiteTestCase.createParameters();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,214 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cloud.gce;
|
||||
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.path.PathTrie;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.RestUtils;
|
||||
import org.elasticsearch.test.fixture.AbstractHttpFixture;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
|
||||
/**
|
||||
* {@link GCEFixture} is a fixture that emulates a GCE service.
|
||||
*/
|
||||
public class GCEFixture extends AbstractHttpFixture {
|
||||
|
||||
public static final String PROJECT_ID = "discovery-gce-test";
|
||||
public static final String ZONE = "test-zone";
|
||||
public static final String TOKEN = "1/fFAGRNJru1FTz70BzhT3Zg";
|
||||
public static final String TOKEN_TYPE = "Bearer";
|
||||
|
||||
private final PathTrie<RequestHandler> handlers;
|
||||
|
||||
private final Path nodes;
|
||||
|
||||
private GCEFixture(final String workingDir, final String nodesUriPath) {
|
||||
super(workingDir);
|
||||
this.nodes = toPath(Objects.requireNonNull(nodesUriPath));
|
||||
this.handlers = defaultHandlers();
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
if (args == null || args.length != 2) {
|
||||
throw new IllegalArgumentException("GCEFixture <working directory> <nodes transport uri file>");
|
||||
}
|
||||
|
||||
final GCEFixture fixture = new GCEFixture(args[0], args[1]);
|
||||
fixture.listen();
|
||||
}
|
||||
|
||||
private static String nonAuthPath(Request request) {
|
||||
return nonAuthPath(request.getMethod(), request.getPath());
|
||||
}
|
||||
|
||||
private static String nonAuthPath(String method, String path) {
|
||||
return "NONAUTH " + method + " " + path;
|
||||
}
|
||||
|
||||
private static String authPath(Request request) {
|
||||
return authPath(request.getMethod(), request.getPath());
|
||||
}
|
||||
|
||||
private static String authPath(String method, String path) {
|
||||
return "AUTH " + method + " " + path;
|
||||
}
|
||||
|
||||
/** Builds the default request handlers **/
|
||||
private PathTrie<RequestHandler> defaultHandlers() {
|
||||
final PathTrie<RequestHandler> handlers = new PathTrie<>(RestUtils.REST_DECODER);
|
||||
|
||||
final Consumer<Map<String, String>> commonHeaderConsumer = headers -> headers.put("Metadata-Flavor", "Google");
|
||||
|
||||
final Function<String, Response> simpleValue = value -> {
|
||||
final Map<String, String> headers = new HashMap<>(TEXT_PLAIN_CONTENT_TYPE);
|
||||
commonHeaderConsumer.accept(headers);
|
||||
|
||||
final byte[] responseAsBytes = value.getBytes(StandardCharsets.UTF_8);
|
||||
return new Response(RestStatus.OK.getStatus(), headers, responseAsBytes);
|
||||
};
|
||||
|
||||
final Function<String, Response> jsonValue = value -> {
|
||||
final Map<String, String> headers = new HashMap<>(JSON_CONTENT_TYPE);
|
||||
commonHeaderConsumer.accept(headers);
|
||||
|
||||
final byte[] responseAsBytes = value.getBytes(StandardCharsets.UTF_8);
|
||||
return new Response(RestStatus.OK.getStatus(), headers, responseAsBytes);
|
||||
};
|
||||
|
||||
// https://cloud.google.com/compute/docs/storing-retrieving-metadata
|
||||
handlers.insert(nonAuthPath(HttpGet.METHOD_NAME, "/computeMetadata/v1/project/project-id"),
|
||||
request -> simpleValue.apply(PROJECT_ID));
|
||||
handlers.insert(nonAuthPath(HttpGet.METHOD_NAME, "/computeMetadata/v1/project/attributes/google-compute-default-zone"),
|
||||
request -> simpleValue.apply(ZONE));
|
||||
// https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances
|
||||
handlers.insert(nonAuthPath(HttpGet.METHOD_NAME, "/computeMetadata/v1/instance/service-accounts/default/token"),
|
||||
request -> jsonValue.apply(Strings.toString(jsonBuilder()
|
||||
.startObject()
|
||||
.field("access_token", TOKEN)
|
||||
.field("expires_in", TimeUnit.HOURS.toSeconds(1))
|
||||
.field("token_type", TOKEN_TYPE)
|
||||
.endObject())));
|
||||
|
||||
// https://cloud.google.com/compute/docs/reference/rest/v1/instances
|
||||
handlers.insert(authPath(HttpGet.METHOD_NAME, "/compute/v1/projects/{project}/zones/{zone}/instances"),
|
||||
request -> {
|
||||
final List items = new ArrayList();
|
||||
int count = 0;
|
||||
for (String address : Files.readAllLines(nodes)) {
|
||||
count++;
|
||||
items.add(MapBuilder.<String, Object>newMapBuilder()
|
||||
.put("id", Long.toString(9309873766405L + count))
|
||||
.put("description", "ES node" + count)
|
||||
.put("name", "test" + count)
|
||||
.put("kind", "compute#instance")
|
||||
.put("machineType", "n1-standard-1")
|
||||
.put("networkInterfaces",
|
||||
Collections.singletonList(MapBuilder.<String, Object>newMapBuilder()
|
||||
.put("accessConfigs", Collections.emptyList())
|
||||
.put("name", "nic0")
|
||||
.put("network", "default")
|
||||
.put("networkIP", address)
|
||||
.immutableMap()))
|
||||
.put("status", "RUNNING")
|
||||
.put("zone", ZONE)
|
||||
.immutableMap());
|
||||
}
|
||||
|
||||
final String json = Strings.toString(jsonBuilder()
|
||||
.startObject()
|
||||
.field("id", "test-instances")
|
||||
.field("items", items)
|
||||
.endObject());
|
||||
|
||||
final byte[] responseAsBytes = json.getBytes(StandardCharsets.UTF_8);
|
||||
final Map<String, String> headers = new HashMap<>(JSON_CONTENT_TYPE);
|
||||
commonHeaderConsumer.accept(headers);
|
||||
return new Response(RestStatus.OK.getStatus(), headers, responseAsBytes);
|
||||
});
|
||||
return handlers;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Response handle(final Request request) throws IOException {
|
||||
final String nonAuthorizedPath = nonAuthPath(request);
|
||||
final RequestHandler nonAuthorizedHandler = handlers.retrieve(nonAuthorizedPath, request.getParameters());
|
||||
if (nonAuthorizedHandler != null) {
|
||||
return nonAuthorizedHandler.handle(request);
|
||||
}
|
||||
|
||||
final String authorizedPath = authPath(request);
|
||||
final RequestHandler authorizedHandler = handlers.retrieve(authorizedPath, request.getParameters());
|
||||
if (authorizedHandler != null) {
|
||||
final String authorization = request.getHeader("Authorization");
|
||||
if ((TOKEN_TYPE + " " + TOKEN).equals(authorization) == false) {
|
||||
return newError(RestStatus.UNAUTHORIZED, "Authorization", "Login Required");
|
||||
}
|
||||
return authorizedHandler.handle(request);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
private static Response newError(final RestStatus status, final String code, final String message) throws IOException {
|
||||
final String response = Strings.toString(jsonBuilder()
|
||||
.startObject()
|
||||
.field("error", MapBuilder.<String, Object>newMapBuilder()
|
||||
.put("errors", Collections.singletonList(
|
||||
MapBuilder.<String, Object>newMapBuilder()
|
||||
.put("domain", "global")
|
||||
.put("reason", "required")
|
||||
.put("message", message)
|
||||
.put("locationType", "header")
|
||||
.put("location", code)
|
||||
.immutableMap()
|
||||
))
|
||||
.put("code", status.getStatus())
|
||||
.put("message", message)
|
||||
.immutableMap())
|
||||
.endObject());
|
||||
|
||||
return new Response(status.getStatus(), JSON_CONTENT_TYPE, response.getBytes(UTF_8));
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Paths#get is fine - we don't have environment here")
|
||||
private static Path toPath(final String dir) {
|
||||
return Paths.get(dir);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
# Integration tests for discovery-gce
|
||||
setup:
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
wait_for_nodes: ${expected_nodes}
|
||||
|
||||
---
|
||||
"All nodes are correctly discovered":
|
||||
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ transport ]
|
||||
|
||||
- match: { _nodes.total: ${expected_nodes} }
|
|
@ -75,4 +75,8 @@ public interface GceInstancesService extends Closeable {
|
|||
* @return a collection of running instances within the same GCE project
|
||||
*/
|
||||
Collection<Instance> instances();
|
||||
|
||||
String projectId();
|
||||
|
||||
List<String> zones();
|
||||
}
|
||||
|
|
|
@ -29,6 +29,11 @@ import java.util.function.Function;
|
|||
|
||||
import com.google.api.client.googleapis.compute.ComputeCredential;
|
||||
import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport;
|
||||
import com.google.api.client.http.GenericUrl;
|
||||
import com.google.api.client.http.HttpHeaders;
|
||||
import com.google.api.client.http.HttpRequest;
|
||||
import com.google.api.client.http.HttpRequestFactory;
|
||||
import com.google.api.client.http.HttpResponse;
|
||||
import com.google.api.client.http.HttpTransport;
|
||||
import com.google.api.client.http.javanet.NetHttpTransport;
|
||||
import com.google.api.client.json.JsonFactory;
|
||||
|
@ -103,9 +108,58 @@ public class GceInstancesServiceImpl extends AbstractComponent implements GceIns
|
|||
|
||||
public GceInstancesServiceImpl(Settings settings) {
|
||||
super(settings);
|
||||
this.project = PROJECT_SETTING.get(settings);
|
||||
this.zones = ZONE_SETTING.get(settings);
|
||||
this.validateCerts = GCE_VALIDATE_CERTIFICATES.get(settings);
|
||||
this.project = resolveProject();
|
||||
this.zones = resolveZones();
|
||||
}
|
||||
|
||||
private String resolveProject() {
|
||||
if (PROJECT_SETTING.exists(settings)) {
|
||||
return PROJECT_SETTING.get(settings);
|
||||
}
|
||||
|
||||
try {
|
||||
// this code is based on a private GCE method: {@link com.google.cloud.ServiceOptions#getAppEngineProjectIdFromMetadataServer()}
|
||||
return getAppEngineValueFromMetadataServer("/computeMetadata/v1/project/project-id");
|
||||
} catch (Exception e) {
|
||||
logger.warn("unable to resolve project from metadata server for GCE discovery service", e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private List<String> resolveZones() {
|
||||
if (ZONE_SETTING.exists(settings)) {
|
||||
return ZONE_SETTING.get(settings);
|
||||
}
|
||||
|
||||
try {
|
||||
final String defaultZone =
|
||||
getAppEngineValueFromMetadataServer("/computeMetadata/v1/project/attributes/google-compute-default-zone");
|
||||
return Collections.singletonList(defaultZone);
|
||||
} catch (Exception e) {
|
||||
logger.warn("unable to resolve default zone from metadata server for GCE discovery service", e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
String getAppEngineValueFromMetadataServer(String serviceURL) throws GeneralSecurityException, IOException {
|
||||
String metadata = GceMetadataService.GCE_HOST.get(settings);
|
||||
GenericUrl url = Access.doPrivileged(() -> new GenericUrl(metadata + serviceURL));
|
||||
|
||||
HttpTransport httpTransport = getGceHttpTransport();
|
||||
HttpRequestFactory requestFactory = httpTransport.createRequestFactory();
|
||||
HttpRequest request = requestFactory.buildGetRequest(url)
|
||||
.setConnectTimeout(500)
|
||||
.setReadTimeout(500)
|
||||
.setHeaders(new HttpHeaders().set("Metadata-Flavor", "Google"));
|
||||
HttpResponse response = Access.doPrivilegedIOException(() -> request.execute());
|
||||
return headerContainsMetadataFlavor(response) ? response.parseAsString() : null;
|
||||
}
|
||||
|
||||
private static boolean headerContainsMetadataFlavor(HttpResponse response) {
|
||||
// com.google.cloud.ServiceOptions#headerContainsMetadataFlavor(HttpResponse)}
|
||||
String metadataFlavorValue = response.getHeaders().getFirstHeaderStringValue("Metadata-Flavor");
|
||||
return "Google".equals(metadataFlavorValue);
|
||||
}
|
||||
|
||||
protected synchronized HttpTransport getGceHttpTransport() throws GeneralSecurityException, IOException {
|
||||
|
@ -180,6 +234,16 @@ public class GceInstancesServiceImpl extends AbstractComponent implements GceIns
|
|||
return this.client;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String projectId() {
|
||||
return project;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> zones() {
|
||||
return zones;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
if (gceHttpTransport != null) {
|
||||
|
|
|
@ -79,8 +79,8 @@ public class GceUnicastHostsProvider extends AbstractComponent implements Unicas
|
|||
this.networkService = networkService;
|
||||
|
||||
this.refreshInterval = GceInstancesService.REFRESH_SETTING.get(settings);
|
||||
this.project = GceInstancesService.PROJECT_SETTING.get(settings);
|
||||
this.zones = GceInstancesService.ZONE_SETTING.get(settings);
|
||||
this.project = gceInstancesService.projectId();
|
||||
this.zones = gceInstancesService.zones();
|
||||
|
||||
this.tags = TAGS_SETTING.get(settings);
|
||||
if (logger.isDebugEnabled()) {
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.plugin.discovery.gce;
|
|||
import com.google.api.client.http.HttpHeaders;
|
||||
import com.google.api.client.util.ClassInfo;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.cloud.gce.GceInstancesService;
|
||||
|
@ -41,6 +42,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -49,8 +51,12 @@ import java.util.function.Supplier;
|
|||
|
||||
public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Closeable {
|
||||
|
||||
/** Determines whether settings those reroutes GCE call should be allowed (for testing purposes only). */
|
||||
private static final boolean ALLOW_REROUTE_GCE_SETTINGS =
|
||||
Booleans.parseBoolean(System.getProperty("es.allow_reroute_gce_settings", "false"));
|
||||
|
||||
public static final String GCE = "gce";
|
||||
private final Settings settings;
|
||||
protected final Settings settings;
|
||||
private static final Logger logger = Loggers.getLogger(GceDiscoveryPlugin.class);
|
||||
// stashed when created in order to properly close
|
||||
private final SetOnce<GceInstancesService> gceInstancesService = new SetOnce<>();
|
||||
|
@ -94,14 +100,22 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close
|
|||
|
||||
@Override
|
||||
public List<Setting<?>> getSettings() {
|
||||
return Arrays.asList(
|
||||
// Register GCE settings
|
||||
GceInstancesService.PROJECT_SETTING,
|
||||
GceInstancesService.ZONE_SETTING,
|
||||
GceUnicastHostsProvider.TAGS_SETTING,
|
||||
GceInstancesService.REFRESH_SETTING,
|
||||
GceInstancesService.RETRY_SETTING,
|
||||
GceInstancesService.MAX_WAIT_SETTING);
|
||||
List<Setting<?>> settings = new ArrayList<>(
|
||||
Arrays.asList(
|
||||
// Register GCE settings
|
||||
GceInstancesService.PROJECT_SETTING,
|
||||
GceInstancesService.ZONE_SETTING,
|
||||
GceUnicastHostsProvider.TAGS_SETTING,
|
||||
GceInstancesService.REFRESH_SETTING,
|
||||
GceInstancesService.RETRY_SETTING,
|
||||
GceInstancesService.MAX_WAIT_SETTING)
|
||||
);
|
||||
|
||||
if (ALLOW_REROUTE_GCE_SETTINGS) {
|
||||
settings.add(GceMetadataService.GCE_HOST);
|
||||
settings.add(GceInstancesServiceImpl.GCE_ROOT_URL);
|
||||
}
|
||||
return Collections.unmodifiableList(settings);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cloud.gce;
|
||||
|
||||
import com.google.api.client.http.HttpTransport;
|
||||
import com.google.api.client.http.LowLevelHttpRequest;
|
||||
import com.google.api.client.http.LowLevelHttpResponse;
|
||||
import com.google.api.client.json.Json;
|
||||
import com.google.api.client.testing.http.MockHttpTransport;
|
||||
import com.google.api.client.testing.http.MockLowLevelHttpRequest;
|
||||
import com.google.api.client.testing.http.MockLowLevelHttpResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.nullValue;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
|
||||
public class GceInstancesServiceImplTests extends ESTestCase {
|
||||
|
||||
public void testHeaderContainsMetadataFlavor() throws Exception {
|
||||
final AtomicBoolean addMetdataFlavor = new AtomicBoolean();
|
||||
final MockHttpTransport transport = new MockHttpTransport() {
|
||||
@Override
|
||||
public LowLevelHttpRequest buildRequest(String method, final String url) {
|
||||
return new MockLowLevelHttpRequest() {
|
||||
@Override
|
||||
public LowLevelHttpResponse execute() {
|
||||
MockLowLevelHttpResponse response = new MockLowLevelHttpResponse();
|
||||
response.setStatusCode(200);
|
||||
response.setContentType(Json.MEDIA_TYPE);
|
||||
response.setContent("value");
|
||||
if (addMetdataFlavor.get()) {
|
||||
response.addHeader("Metadata-Flavor", "Google");
|
||||
}
|
||||
return response;
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
final GceInstancesServiceImpl service = new GceInstancesServiceImpl(Settings.EMPTY) {
|
||||
@Override
|
||||
protected synchronized HttpTransport getGceHttpTransport() {
|
||||
return transport;
|
||||
}
|
||||
};
|
||||
|
||||
final String serviceURL = "/computeMetadata/v1/project/project-id";
|
||||
assertThat(service.getAppEngineValueFromMetadataServer(serviceURL), is(nullValue()));
|
||||
|
||||
addMetdataFlavor.set(true);
|
||||
assertThat(service.getAppEngineValueFromMetadataServer(serviceURL), is("value"));
|
||||
}
|
||||
}
|
|
@ -170,6 +170,16 @@ public class GceDiscoverTests extends ESIntegTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public String projectId() {
|
||||
return PROJECT_SETTING.get(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> zones() {
|
||||
return ZONE_SETTING.get(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.discovery.gce;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cloud.gce.GceInstancesServiceImpl;
|
||||
import org.elasticsearch.cloud.gce.GceMetadataService;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
|
@ -40,6 +41,7 @@ import java.util.Locale;
|
|||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
/**
|
||||
* This test class uses a GCE HTTP Mock system which allows to simulate JSON Responses.
|
||||
|
@ -211,7 +213,10 @@ public class GceDiscoveryTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testIllegalSettingsMissingAllRequired() {
|
||||
Settings nodeSettings = Settings.EMPTY;
|
||||
Settings nodeSettings = Settings.builder()
|
||||
// to prevent being resolved using default GCE host
|
||||
.put(GceMetadataService.GCE_HOST.getKey(), "http://internal")
|
||||
.build();
|
||||
mock = new GceInstancesServiceMock(nodeSettings);
|
||||
try {
|
||||
buildDynamicNodes(mock, nodeSettings);
|
||||
|
@ -223,6 +228,8 @@ public class GceDiscoveryTests extends ESTestCase {
|
|||
|
||||
public void testIllegalSettingsMissingProject() {
|
||||
Settings nodeSettings = Settings.builder()
|
||||
// to prevent being resolved using default GCE host
|
||||
.put(GceMetadataService.GCE_HOST.getKey(), "http://internal")
|
||||
.putList(GceInstancesServiceImpl.ZONE_SETTING.getKey(), "us-central1-a", "us-central1-b")
|
||||
.build();
|
||||
mock = new GceInstancesServiceMock(nodeSettings);
|
||||
|
@ -236,6 +243,8 @@ public class GceDiscoveryTests extends ESTestCase {
|
|||
|
||||
public void testIllegalSettingsMissingZone() {
|
||||
Settings nodeSettings = Settings.builder()
|
||||
// to prevent being resolved using default GCE host
|
||||
.put(GceMetadataService.GCE_HOST.getKey(), "http://internal")
|
||||
.put(GceInstancesServiceImpl.PROJECT_SETTING.getKey(), projectName)
|
||||
.build();
|
||||
mock = new GceInstancesServiceMock(nodeSettings);
|
||||
|
@ -261,4 +270,13 @@ public class GceDiscoveryTests extends ESTestCase {
|
|||
List<TransportAddress> dynamicHosts = buildDynamicNodes(mock, nodeSettings);
|
||||
assertThat(dynamicHosts, hasSize(1));
|
||||
}
|
||||
|
||||
public void testMetadataServerValues() {
|
||||
Settings nodeSettings = Settings.EMPTY;
|
||||
mock = new GceInstancesServiceMock(nodeSettings);
|
||||
assertThat(mock.projectId(), not(projectName));
|
||||
|
||||
List<TransportAddress> dynamicHosts = buildDynamicNodes(mock, nodeSettings);
|
||||
assertThat(dynamicHosts, hasSize(1));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,11 +32,13 @@ public class GceInstancesServiceMock extends GceInstancesServiceImpl {
|
|||
|
||||
public GceInstancesServiceMock(Settings settings) {
|
||||
super(settings);
|
||||
this.mockHttpTransport = GceMockUtils.configureMock();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected HttpTransport getGceHttpTransport() throws GeneralSecurityException, IOException {
|
||||
if (this.mockHttpTransport == null) {
|
||||
this.mockHttpTransport = GceMockUtils.configureMock();
|
||||
}
|
||||
return this.mockHttpTransport;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ import java.net.URL;
|
|||
public class GceMockUtils {
|
||||
protected static final Logger logger = Loggers.getLogger(GceMockUtils.class);
|
||||
|
||||
public static final String GCE_METADATA_URL = "http://metadata.google.internal/computeMetadata/v1/instance";
|
||||
public static final String GCE_METADATA_URL = "http://metadata.google.internal/computeMetadata/v1/";
|
||||
|
||||
protected static HttpTransport configureMock() {
|
||||
return new MockHttpTransport() {
|
||||
|
@ -54,6 +54,7 @@ public class GceMockUtils {
|
|||
if (url.startsWith(GCE_METADATA_URL)) {
|
||||
logger.info("--> Simulate GCE Auth/Metadata response for [{}]", url);
|
||||
response.setContent(readGoogleInternalJsonResponse(url));
|
||||
response.addHeader("Metadata-Flavor", "Google");
|
||||
} else {
|
||||
logger.info("--> Simulate GCE API response for [{}]", url);
|
||||
response.setContent(readGoogleApiJsonResponse(url));
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
"id": "dummy",
|
||||
"items":[
|
||||
{
|
||||
"description": "ES Node 1",
|
||||
"id": "9309873766428965105",
|
||||
"kind": "compute#instance",
|
||||
"machineType": "n1-standard-1",
|
||||
"name": "test1",
|
||||
"networkInterfaces": [
|
||||
{
|
||||
"accessConfigs": [
|
||||
{
|
||||
"kind": "compute#accessConfig",
|
||||
"name": "External NAT",
|
||||
"natIP": "104.155.13.147",
|
||||
"type": "ONE_TO_ONE_NAT"
|
||||
}
|
||||
],
|
||||
"name": "nic0",
|
||||
"network": "default",
|
||||
"networkIP": "10.240.79.59"
|
||||
}
|
||||
],
|
||||
"status": "RUNNING",
|
||||
"tags": {
|
||||
"fingerprint": "xA6QJb-rGtg=",
|
||||
"items": [
|
||||
"elasticsearch",
|
||||
"dev"
|
||||
]
|
||||
},
|
||||
"zone": "europe-west1-b"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
{
|
||||
"id": "dummy",
|
||||
"items":[
|
||||
{
|
||||
"description": "ES Node 2",
|
||||
"id": "9309873766428965105",
|
||||
"kind": "compute#instance",
|
||||
"machineType": "n1-standard-1",
|
||||
"name": "test2",
|
||||
"networkInterfaces": [
|
||||
{
|
||||
"accessConfigs": [
|
||||
{
|
||||
"kind": "compute#accessConfig",
|
||||
"name": "External NAT",
|
||||
"natIP": "104.155.13.147",
|
||||
"type": "ONE_TO_ONE_NAT"
|
||||
}
|
||||
],
|
||||
"name": "nic0",
|
||||
"network": "default",
|
||||
"networkIP": "10.240.79.59"
|
||||
}
|
||||
],
|
||||
"status": "RUNNING",
|
||||
"tags": {
|
||||
"fingerprint": "xA6QJb-rGtg=",
|
||||
"items": [
|
||||
"elasticsearch",
|
||||
"dev"
|
||||
]
|
||||
},
|
||||
"zone": "us-central1-a"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
europe-west1-b
|
|
@ -0,0 +1 @@
|
|||
metadataserver
|
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"delete_by_query_rethrottle": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html",
|
||||
"methods": ["POST"],
|
||||
"url": {
|
||||
"path": "/_delete_by_query/{task_id}/_rethrottle",
|
||||
"paths": ["/_delete_by_query/{task_id}/_rethrottle"],
|
||||
"parts": {
|
||||
"task_id": {
|
||||
"type": "string",
|
||||
"required" : true,
|
||||
"description": "The task id to rethrottle"
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"requests_per_second": {
|
||||
"type": "number",
|
||||
"required": true,
|
||||
"description": "The throttle to set on this request in floating sub-requests per second. -1 means set no throttle."
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": null
|
||||
}
|
||||
}
|
|
@ -4,7 +4,7 @@
|
|||
"methods": ["POST"],
|
||||
"url": {
|
||||
"path": "/_reindex/{task_id}/_rethrottle",
|
||||
"paths": ["/_reindex/{task_id}/_rethrottle", "/_update_by_query/{task_id}/_rethrottle", "/_delete_by_query/{task_id}/_rethrottle"],
|
||||
"paths": ["/_reindex/{task_id}/_rethrottle"],
|
||||
"parts": {
|
||||
"task_id": {
|
||||
"type": "string",
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"update_by_query_rethrottle": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html",
|
||||
"methods": ["POST"],
|
||||
"url": {
|
||||
"path": "/_update_by_query/{task_id}/_rethrottle",
|
||||
"paths": ["/_update_by_query/{task_id}/_rethrottle"],
|
||||
"parts": {
|
||||
"task_id": {
|
||||
"type": "string",
|
||||
"required" : true,
|
||||
"description": "The task id to rethrottle"
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"requests_per_second": {
|
||||
"type": "number",
|
||||
"required": true,
|
||||
"description": "The throttle to set on this request in floating sub-requests per second. -1 means set no throttle."
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": null
|
||||
}
|
||||
}
|
|
@ -24,6 +24,7 @@ import com.carrotsearch.hppc.ObjectObjectMap;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
||||
|
@ -32,6 +33,7 @@ import java.util.Collection;
|
|||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -454,13 +456,40 @@ public abstract class ParseContext implements Iterable<ParseContext.Document>{
|
|||
}
|
||||
|
||||
void postParse() {
|
||||
// reverse the order of docs for nested docs support, parent should be last
|
||||
if (documents.size() > 1) {
|
||||
docsReversed = true;
|
||||
Collections.reverse(documents);
|
||||
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_5_0)) {
|
||||
/**
|
||||
* For indices created on or after {@link Version#V_6_5_0} we preserve the order
|
||||
* of the children while ensuring that parents appear after them.
|
||||
*/
|
||||
List<Document> newDocs = reorderParent(documents);
|
||||
documents.clear();
|
||||
documents.addAll(newDocs);
|
||||
} else {
|
||||
// reverse the order of docs for nested docs support, parent should be last
|
||||
Collections.reverse(documents);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a copy of the provided {@link List} where parent documents appear
|
||||
* after their children.
|
||||
*/
|
||||
private List<Document> reorderParent(List<Document> docs) {
|
||||
List<Document> newDocs = new ArrayList<>(docs.size());
|
||||
LinkedList<Document> parents = new LinkedList<>();
|
||||
for (Document doc : docs) {
|
||||
while (parents.peek() != doc.getParent()){
|
||||
newDocs.add(parents.poll());
|
||||
}
|
||||
parents.add(0, doc);
|
||||
}
|
||||
newDocs.addAll(parents);
|
||||
return newDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<Document> iterator() {
|
||||
return documents.iterator();
|
||||
|
|
|
@ -21,6 +21,9 @@ package org.elasticsearch.indices;
|
|||
|
||||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
import com.carrotsearch.hppc.ObjectSet;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
|
@ -75,6 +78,8 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo
|
|||
public static final Setting<TimeValue> INDICES_CACHE_QUERY_EXPIRE =
|
||||
Setting.positiveTimeSetting("indices.requests.cache.expire", new TimeValue(0), Property.NodeScope);
|
||||
|
||||
private static final Logger LOGGER = LogManager.getLogger(IndicesRequestCache.class);
|
||||
|
||||
private final ConcurrentMap<CleanupKey, Boolean> registeredClosedListeners = ConcurrentCollections.newConcurrentMap();
|
||||
private final Set<CleanupKey> keysToClean = ConcurrentCollections.newConcurrentSet();
|
||||
private final ByteSizeValue size;
|
||||
|
@ -109,13 +114,19 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo
|
|||
notification.getKey().entity.onRemoval(notification);
|
||||
}
|
||||
|
||||
// NORELEASE The cacheKeyRenderer has been added in order to debug
|
||||
// https://github.com/elastic/elasticsearch/issues/32827, it should be
|
||||
// removed when this issue is solved
|
||||
BytesReference getOrCompute(CacheEntity cacheEntity, Supplier<BytesReference> loader,
|
||||
DirectoryReader reader, BytesReference cacheKey) throws Exception {
|
||||
DirectoryReader reader, BytesReference cacheKey, Supplier<String> cacheKeyRenderer) throws Exception {
|
||||
final Key key = new Key(cacheEntity, reader.getVersion(), cacheKey);
|
||||
Loader cacheLoader = new Loader(cacheEntity, loader);
|
||||
BytesReference value = cache.computeIfAbsent(key, cacheLoader);
|
||||
if (cacheLoader.isLoaded()) {
|
||||
key.entity.onMiss();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Cache miss for reader version [{}] and request:\n {}", reader.getVersion(), cacheKeyRenderer.get());
|
||||
}
|
||||
// see if its the first time we see this reader, and make sure to register a cleanup key
|
||||
CleanupKey cleanupKey = new CleanupKey(cacheEntity, reader.getVersion());
|
||||
if (!registeredClosedListeners.containsKey(cleanupKey)) {
|
||||
|
@ -126,6 +137,9 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo
|
|||
}
|
||||
} else {
|
||||
key.entity.onHit();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Cache hit for reader version [{}] and request:\n {}", reader.getVersion(), cacheKeyRenderer.get());
|
||||
}
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
|
|
@ -1191,7 +1191,9 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
final DirectoryReader directoryReader = context.searcher().getDirectoryReader();
|
||||
|
||||
boolean[] loadedFromCache = new boolean[] { true };
|
||||
BytesReference bytesReference = cacheShardLevelResult(context.indexShard(), directoryReader, request.cacheKey(), out -> {
|
||||
BytesReference bytesReference = cacheShardLevelResult(context.indexShard(), directoryReader, request.cacheKey(), () -> {
|
||||
return "Shard: " + request.shardId() + "\nSource:\n" + request.source();
|
||||
}, out -> {
|
||||
queryPhase.execute(context);
|
||||
try {
|
||||
context.queryResult().writeToNoId(out);
|
||||
|
@ -1217,6 +1219,10 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
// running a search that times out concurrently will likely timeout again if it's run while we have this `stale` result in the
|
||||
// cache. One other option is to not cache requests with a timeout at all...
|
||||
indicesRequestCache.invalidate(new IndexShardCacheEntity(context.indexShard()), directoryReader, request.cacheKey());
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Query timed out, invalidating cache entry for request on shard [{}]:\n {}", request.shardId(),
|
||||
request.source());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1232,8 +1238,8 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
* @param loader loads the data into the cache if needed
|
||||
* @return the contents of the cache or the result of calling the loader
|
||||
*/
|
||||
private BytesReference cacheShardLevelResult(IndexShard shard, DirectoryReader reader, BytesReference cacheKey, Consumer<StreamOutput> loader)
|
||||
throws Exception {
|
||||
private BytesReference cacheShardLevelResult(IndexShard shard, DirectoryReader reader, BytesReference cacheKey,
|
||||
Supplier<String> cacheKeyRenderer, Consumer<StreamOutput> loader) throws Exception {
|
||||
IndexShardCacheEntity cacheEntity = new IndexShardCacheEntity(shard);
|
||||
Supplier<BytesReference> supplier = () -> {
|
||||
/* BytesStreamOutput allows to pass the expected size but by default uses
|
||||
|
@ -1251,7 +1257,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
return out.bytes();
|
||||
}
|
||||
};
|
||||
return indicesRequestCache.getOrCompute(cacheEntity, supplier, reader, cacheKey);
|
||||
return indicesRequestCache.getOrCompute(cacheEntity, supplier, reader, cacheKey, cacheKeyRenderer);
|
||||
}
|
||||
|
||||
static final class IndexShardCacheEntity extends AbstractIndexShardCacheEntity {
|
||||
|
|
|
@ -196,6 +196,14 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder
|
|||
protected ScriptedMetricAggregatorFactory doBuild(SearchContext context, AggregatorFactory<?> parent,
|
||||
Builder subfactoriesBuilder) throws IOException {
|
||||
|
||||
if (combineScript == null) {
|
||||
throw new IllegalArgumentException("[combineScript] must not be null: [" + name + "]");
|
||||
}
|
||||
|
||||
if(reduceScript == null) {
|
||||
throw new IllegalArgumentException("[reduceScript] must not be null: [" + name + "]");
|
||||
}
|
||||
|
||||
QueryShardContext queryShardContext = context.getQueryShardContext();
|
||||
|
||||
// Extract params from scripts and pass them along to ScriptedMetricAggregatorFactory, since it won't have
|
||||
|
@ -215,16 +223,14 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder
|
|||
ScriptedMetricAggContexts.MapScript.CONTEXT);
|
||||
Map<String, Object> mapScriptParams = mapScript.getParams();
|
||||
|
||||
|
||||
ScriptedMetricAggContexts.CombineScript.Factory compiledCombineScript;
|
||||
Map<String, Object> combineScriptParams;
|
||||
if (combineScript != null) {
|
||||
compiledCombineScript = queryShardContext.getScriptService().compile(combineScript,
|
||||
ScriptedMetricAggContexts.CombineScript.CONTEXT);
|
||||
combineScriptParams = combineScript.getParams();
|
||||
} else {
|
||||
compiledCombineScript = (p, a) -> null;
|
||||
combineScriptParams = Collections.emptyMap();
|
||||
}
|
||||
|
||||
compiledCombineScript = queryShardContext.getScriptService().compile(combineScript,
|
||||
ScriptedMetricAggContexts.CombineScript.CONTEXT);
|
||||
combineScriptParams = combineScript.getParams();
|
||||
|
||||
return new ScriptedMetricAggregatorFactory(name, compiledMapScript, mapScriptParams, compiledInitScript,
|
||||
initScriptParams, compiledCombineScript, combineScriptParams, reduceScript,
|
||||
params, queryShardContext.lookup(), context, parent, subfactoriesBuilder, metaData);
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.search.TotalHits.Relation;
|
|||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
|
@ -38,6 +39,7 @@ import org.elasticsearch.common.text.Text;
|
|||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor;
|
||||
import org.elasticsearch.index.fieldvisitor.FieldsVisitor;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
|
@ -344,6 +346,7 @@ public class FetchPhase implements SearchPhase {
|
|||
ObjectMapper current = nestedObjectMapper;
|
||||
String originalName = nestedObjectMapper.name();
|
||||
SearchHit.NestedIdentity nestedIdentity = null;
|
||||
final IndexSettings indexSettings = context.getQueryShardContext().getIndexSettings();
|
||||
do {
|
||||
Query parentFilter;
|
||||
nestedParentObjectMapper = current.getParentObjectMapper(mapperService);
|
||||
|
@ -374,12 +377,32 @@ public class FetchPhase implements SearchPhase {
|
|||
BitSet parentBits = context.bitsetFilterCache().getBitSetProducer(parentFilter).getBitSet(subReaderContext);
|
||||
|
||||
int offset = 0;
|
||||
int nextParent = parentBits.nextSetBit(currentParent);
|
||||
for (int docId = childIter.advance(currentParent + 1); docId < nextParent && docId != DocIdSetIterator.NO_MORE_DOCS;
|
||||
docId = childIter.nextDoc()) {
|
||||
offset++;
|
||||
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_5_0)) {
|
||||
/**
|
||||
* Starts from the previous parent and finds the offset of the
|
||||
* <code>nestedSubDocID</code> within the nested children. Nested documents
|
||||
* are indexed in the same order than in the source array so the offset
|
||||
* of the nested child is the number of nested document with the same parent
|
||||
* that appear before him.
|
||||
*/
|
||||
int previousParent = parentBits.prevSetBit(currentParent);
|
||||
for (int docId = childIter.advance(previousParent + 1); docId < nestedSubDocId && docId != DocIdSetIterator.NO_MORE_DOCS;
|
||||
docId = childIter.nextDoc()) {
|
||||
offset++;
|
||||
}
|
||||
currentParent = nestedSubDocId;
|
||||
} else {
|
||||
/**
|
||||
* Nested documents are in reverse order in this version so we start from the current nested document
|
||||
* and find the number of documents with the same parent that appear after it.
|
||||
*/
|
||||
int nextParent = parentBits.nextSetBit(currentParent);
|
||||
for (int docId = childIter.advance(currentParent + 1); docId < nextParent && docId != DocIdSetIterator.NO_MORE_DOCS;
|
||||
docId = childIter.nextDoc()) {
|
||||
offset++;
|
||||
}
|
||||
currentParent = nextParent;
|
||||
}
|
||||
currentParent = nextParent;
|
||||
current = nestedObjectMapper = nestedParentObjectMapper;
|
||||
int currentPrefix = current == null ? 0 : current.name().length() + 1;
|
||||
nestedIdentity = new SearchHit.NestedIdentity(originalName.substring(currentPrefix), offset, nestedIdentity);
|
||||
|
|
|
@ -389,28 +389,28 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
|
|||
assertEquals(6, doc.docs().size());
|
||||
|
||||
Document nested = doc.docs().get(0);
|
||||
assertFieldValue(nested, "n1.n2.target", 7L);
|
||||
assertFieldValue(nested, "n1.n2.target", 3L);
|
||||
assertFieldValue(nested, "n1.target");
|
||||
assertFieldValue(nested, "target");
|
||||
|
||||
nested = doc.docs().get(2);
|
||||
nested = doc.docs().get(1);
|
||||
assertFieldValue(nested, "n1.n2.target", 5L);
|
||||
assertFieldValue(nested, "n1.target");
|
||||
assertFieldValue(nested, "target");
|
||||
|
||||
nested = doc.docs().get(3);
|
||||
assertFieldValue(nested, "n1.n2.target", 3L);
|
||||
assertFieldValue(nested, "n1.n2.target", 7L);
|
||||
assertFieldValue(nested, "n1.target");
|
||||
assertFieldValue(nested, "target");
|
||||
|
||||
Document parent = doc.docs().get(1);
|
||||
Document parent = doc.docs().get(2);
|
||||
assertFieldValue(parent, "target");
|
||||
assertFieldValue(parent, "n1.target", 7L);
|
||||
assertFieldValue(parent, "n1.target", 3L, 5L);
|
||||
assertFieldValue(parent, "n1.n2.target");
|
||||
|
||||
parent = doc.docs().get(4);
|
||||
assertFieldValue(parent, "target");
|
||||
assertFieldValue(parent, "n1.target", 3L, 5L);
|
||||
assertFieldValue(parent, "n1.target", 7L);
|
||||
assertFieldValue(parent, "n1.n2.target");
|
||||
|
||||
Document root = doc.docs().get(5);
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.index.mapper;
|
|||
|
||||
import java.util.HashSet;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
|
@ -33,6 +35,7 @@ import org.elasticsearch.index.mapper.ObjectMapper.Dynamic;
|
|||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
|
@ -120,11 +123,11 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase {
|
|||
|
||||
assertThat(doc.docs().size(), equalTo(3));
|
||||
assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString()));
|
||||
assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("3"));
|
||||
assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("4"));
|
||||
assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("1"));
|
||||
assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(1).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString()));
|
||||
assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("1"));
|
||||
assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("3"));
|
||||
assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("4"));
|
||||
|
||||
assertThat(doc.docs().get(2).get("field"), equalTo("value"));
|
||||
}
|
||||
|
@ -160,20 +163,20 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase {
|
|||
XContentType.JSON));
|
||||
|
||||
assertThat(doc.docs().size(), equalTo(7));
|
||||
assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
|
||||
assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
|
||||
assertThat(doc.docs().get(0).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
|
||||
assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("3"));
|
||||
assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
|
||||
assertThat(doc.docs().get(1).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
|
||||
assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("1"));
|
||||
assertThat(doc.docs().get(2).get("nested1.nested2.field2"), nullValue());
|
||||
assertThat(doc.docs().get(2).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
|
||||
assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("5"));
|
||||
assertThat(doc.docs().get(3).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("6"));
|
||||
assertThat(doc.docs().get(4).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
|
||||
assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("4"));
|
||||
assertThat(doc.docs().get(5).get("nested1.nested2.field2"), nullValue());
|
||||
assertThat(doc.docs().get(5).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(6).get("field"), equalTo("value"));
|
||||
|
@ -212,21 +215,21 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase {
|
|||
XContentType.JSON));
|
||||
|
||||
assertThat(doc.docs().size(), equalTo(7));
|
||||
assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
|
||||
assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
|
||||
assertThat(doc.docs().get(0).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
|
||||
assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("3"));
|
||||
assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
|
||||
assertThat(doc.docs().get(1).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
|
||||
assertThat(doc.docs().get(2).get("nested1.nested2.field2"), equalTo("5"));
|
||||
assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("1"));
|
||||
assertThat(doc.docs().get(2).get("nested1.nested2.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(2).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
|
||||
assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("5"));
|
||||
assertThat(doc.docs().get(3).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("6"));
|
||||
assertThat(doc.docs().get(4).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
|
||||
assertThat(doc.docs().get(5).get("nested1.nested2.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("4"));
|
||||
assertThat(doc.docs().get(5).get("nested1.nested2.field2"), equalTo("5"));
|
||||
assertThat(doc.docs().get(5).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(6).get("field"), equalTo("value"));
|
||||
assertThat(doc.docs().get(6).get("nested1.field1"), nullValue());
|
||||
|
@ -264,21 +267,21 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase {
|
|||
XContentType.JSON));
|
||||
|
||||
assertThat(doc.docs().size(), equalTo(7));
|
||||
assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
|
||||
assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
|
||||
assertThat(doc.docs().get(0).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
|
||||
assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("3"));
|
||||
assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
|
||||
assertThat(doc.docs().get(1).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
|
||||
assertThat(doc.docs().get(2).get("nested1.nested2.field2"), equalTo("5"));
|
||||
assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("1"));
|
||||
assertThat(doc.docs().get(2).get("nested1.nested2.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(2).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
|
||||
assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("5"));
|
||||
assertThat(doc.docs().get(3).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("6"));
|
||||
assertThat(doc.docs().get(4).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
|
||||
assertThat(doc.docs().get(5).get("nested1.nested2.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("4"));
|
||||
assertThat(doc.docs().get(5).get("nested1.nested2.field2"), equalTo("5"));
|
||||
assertThat(doc.docs().get(5).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(6).get("field"), equalTo("value"));
|
||||
assertThat(doc.docs().get(6).getFields("nested1.field1").length, equalTo(2));
|
||||
|
@ -316,20 +319,20 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase {
|
|||
XContentType.JSON));
|
||||
|
||||
assertThat(doc.docs().size(), equalTo(7));
|
||||
assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("6"));
|
||||
assertThat(doc.docs().get(0).get("nested1.nested2.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(0).get("nested1.field1"), nullValue());
|
||||
assertThat(doc.docs().get(0).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("5"));
|
||||
assertThat(doc.docs().get(1).get("nested1.nested2.field2"), equalTo("3"));
|
||||
assertThat(doc.docs().get(1).get("nested1.field1"), nullValue());
|
||||
assertThat(doc.docs().get(1).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("4"));
|
||||
assertThat(doc.docs().get(2).get("nested1.field1"), equalTo("1"));
|
||||
assertThat(doc.docs().get(2).get("nested1.nested2.field2"), nullValue());
|
||||
assertThat(doc.docs().get(2).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("3"));
|
||||
assertThat(doc.docs().get(3).get("nested1.nested2.field2"), equalTo("5"));
|
||||
assertThat(doc.docs().get(3).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(4).get("nested1.nested2.field2"), equalTo("6"));
|
||||
assertThat(doc.docs().get(4).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("1"));
|
||||
assertThat(doc.docs().get(5).get("nested1.field1"), equalTo("4"));
|
||||
assertThat(doc.docs().get(5).get("nested1.nested2.field2"), nullValue());
|
||||
assertThat(doc.docs().get(5).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(6).get("field"), equalTo("value"));
|
||||
|
@ -424,9 +427,9 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase {
|
|||
XContentType.JSON));
|
||||
|
||||
assertThat(doc.docs().size(), equalTo(3));
|
||||
assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("4"));
|
||||
assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("1"));
|
||||
assertThat(doc.docs().get(0).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("1"));
|
||||
assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("4"));
|
||||
assertThat(doc.docs().get(1).get("field"), nullValue());
|
||||
assertThat(doc.docs().get(2).get("field"), equalTo("value"));
|
||||
}
|
||||
|
@ -634,4 +637,63 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean forbidPrivateIndexSettings() {
|
||||
/**
|
||||
* This is needed to force the index version with {@link IndexMetaData.SETTING_INDEX_VERSION_CREATED}.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
|
||||
public void testReorderParentBWC() throws IOException {
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
|
||||
.startObject("nested1").field("type", "nested").endObject()
|
||||
.endObject().endObject().endObject());
|
||||
|
||||
Version bwcVersion = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0,
|
||||
Version.V_6_5_0);
|
||||
for (Version version : new Version[] {Version.V_6_5_0, bwcVersion}) {
|
||||
DocumentMapper docMapper = createIndex("test-" + version,
|
||||
Settings.builder().put(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey(), version).build())
|
||||
.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
|
||||
|
||||
assertThat(docMapper.hasNestedObjects(), equalTo(true));
|
||||
ObjectMapper nested1Mapper = docMapper.objectMappers().get("nested1");
|
||||
assertThat(nested1Mapper.nested().isNested(), equalTo(true));
|
||||
|
||||
ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.field("field", "value")
|
||||
.startArray("nested1")
|
||||
.startObject()
|
||||
.field("field1", "1")
|
||||
.field("field2", "2")
|
||||
.endObject()
|
||||
.startObject()
|
||||
.field("field1", "3")
|
||||
.field("field2", "4")
|
||||
.endObject()
|
||||
.endArray()
|
||||
.endObject()),
|
||||
XContentType.JSON));
|
||||
|
||||
assertThat(doc.docs().size(), equalTo(3));
|
||||
if (version.onOrAfter(Version.V_6_5_0)) {
|
||||
assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString()));
|
||||
assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("1"));
|
||||
assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("3"));
|
||||
assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("4"));
|
||||
assertThat(doc.docs().get(2).get("field"), equalTo("value"));
|
||||
} else {
|
||||
assertThat(doc.docs().get(0).get(TypeFieldMapper.NAME), equalTo(nested1Mapper.nestedTypePathAsString()));
|
||||
assertThat(doc.docs().get(0).get("nested1.field1"), equalTo("3"));
|
||||
assertThat(doc.docs().get(0).get("nested1.field2"), equalTo("4"));
|
||||
assertThat(doc.docs().get(1).get("nested1.field1"), equalTo("1"));
|
||||
assertThat(doc.docs().get(1).get("nested1.field2"), equalTo("2"));
|
||||
assertThat(doc.docs().get(2).get("field"), equalTo("value"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
|||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.time.ZoneOffset;
|
||||
|
@ -49,6 +50,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSear
|
|||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
|
||||
@TestLogging(value = "org.elasticsearch.indices.IndicesRequestCache:TRACE")
|
||||
public class IndicesRequestCacheIT extends ESIntegTestCase {
|
||||
|
||||
// One of the primary purposes of the query cache is to cache aggs results
|
||||
|
@ -417,8 +419,8 @@ public class IndicesRequestCacheIT extends ESIntegTestCase {
|
|||
.getRequestCache();
|
||||
// Check the hit count and miss count together so if they are not
|
||||
// correct we can see both values
|
||||
assertEquals(Arrays.asList(expectedHits, expectedMisses),
|
||||
Arrays.asList(requestCacheStats.getHitCount(), requestCacheStats.getMissCount()));
|
||||
assertEquals(Arrays.asList(expectedHits, expectedMisses, 0L),
|
||||
Arrays.asList(requestCacheStats.getHitCount(), requestCacheStats.getMissCount(), requestCacheStats.getEvictions()));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.lucene.search.TopDocs;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
|
@ -39,6 +38,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.index.cache.request.ShardRequestCache;
|
||||
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -68,7 +68,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||
// initial cache
|
||||
TestEntity entity = new TestEntity(requestCacheStats, indexShard);
|
||||
Loader loader = new Loader(reader, 0);
|
||||
BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes);
|
||||
BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("foo", value.streamInput().readString());
|
||||
assertEquals(0, requestCacheStats.stats().getHitCount());
|
||||
assertEquals(1, requestCacheStats.stats().getMissCount());
|
||||
|
@ -79,7 +79,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||
// cache hit
|
||||
entity = new TestEntity(requestCacheStats, indexShard);
|
||||
loader = new Loader(reader, 0);
|
||||
value = cache.getOrCompute(entity, loader, reader, termBytes);
|
||||
value = cache.getOrCompute(entity, loader, reader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("foo", value.streamInput().readString());
|
||||
assertEquals(1, requestCacheStats.stats().getHitCount());
|
||||
assertEquals(1, requestCacheStats.stats().getMissCount());
|
||||
|
@ -126,7 +126,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||
// initial cache
|
||||
TestEntity entity = new TestEntity(requestCacheStats, indexShard);
|
||||
Loader loader = new Loader(reader, 0);
|
||||
BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes);
|
||||
BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("foo", value.streamInput().readString());
|
||||
assertEquals(0, requestCacheStats.stats().getHitCount());
|
||||
assertEquals(1, requestCacheStats.stats().getMissCount());
|
||||
|
@ -140,7 +140,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||
// cache the second
|
||||
TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard);
|
||||
loader = new Loader(secondReader, 0);
|
||||
value = cache.getOrCompute(entity, loader, secondReader, termBytes);
|
||||
value = cache.getOrCompute(entity, loader, secondReader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("bar", value.streamInput().readString());
|
||||
assertEquals(0, requestCacheStats.stats().getHitCount());
|
||||
assertEquals(2, requestCacheStats.stats().getMissCount());
|
||||
|
@ -152,7 +152,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||
|
||||
secondEntity = new TestEntity(requestCacheStats, indexShard);
|
||||
loader = new Loader(secondReader, 0);
|
||||
value = cache.getOrCompute(secondEntity, loader, secondReader, termBytes);
|
||||
value = cache.getOrCompute(secondEntity, loader, secondReader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("bar", value.streamInput().readString());
|
||||
assertEquals(1, requestCacheStats.stats().getHitCount());
|
||||
assertEquals(2, requestCacheStats.stats().getMissCount());
|
||||
|
@ -162,7 +162,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||
|
||||
entity = new TestEntity(requestCacheStats, indexShard);
|
||||
loader = new Loader(reader, 0);
|
||||
value = cache.getOrCompute(entity, loader, reader, termBytes);
|
||||
value = cache.getOrCompute(entity, loader, reader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("foo", value.streamInput().readString());
|
||||
assertEquals(2, requestCacheStats.stats().getHitCount());
|
||||
assertEquals(2, requestCacheStats.stats().getMissCount());
|
||||
|
@ -222,9 +222,9 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||
TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard);
|
||||
Loader secondLoader = new Loader(secondReader, 0);
|
||||
|
||||
BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes);
|
||||
BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("foo", value1.streamInput().readString());
|
||||
BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes);
|
||||
BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("bar", value2.streamInput().readString());
|
||||
size = requestCacheStats.stats().getMemorySize();
|
||||
IOUtils.close(reader, secondReader, writer, dir, cache);
|
||||
|
@ -257,12 +257,12 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||
TestEntity thirddEntity = new TestEntity(requestCacheStats, indexShard);
|
||||
Loader thirdLoader = new Loader(thirdReader, 0);
|
||||
|
||||
BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes);
|
||||
BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("foo", value1.streamInput().readString());
|
||||
BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes);
|
||||
BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("bar", value2.streamInput().readString());
|
||||
logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize());
|
||||
BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes);
|
||||
BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("baz", value3.streamInput().readString());
|
||||
assertEquals(2, cache.count());
|
||||
assertEquals(1, requestCacheStats.stats().getEvictions());
|
||||
|
@ -298,12 +298,12 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||
TestEntity thirddEntity = new TestEntity(requestCacheStats, differentIdentity);
|
||||
Loader thirdLoader = new Loader(thirdReader, 0);
|
||||
|
||||
BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes);
|
||||
BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("foo", value1.streamInput().readString());
|
||||
BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes);
|
||||
BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("bar", value2.streamInput().readString());
|
||||
logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize());
|
||||
BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes);
|
||||
BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("baz", value3.streamInput().readString());
|
||||
assertEquals(3, cache.count());
|
||||
final long hitCount = requestCacheStats.stats().getHitCount();
|
||||
|
@ -312,7 +312,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||
cache.cleanCache();
|
||||
assertEquals(1, cache.count());
|
||||
// third has not been validated since it's a different identity
|
||||
value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes);
|
||||
value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes, () -> termQuery.toString());
|
||||
assertEquals(hitCount + 1, requestCacheStats.stats().getHitCount());
|
||||
assertEquals("baz", value3.streamInput().readString());
|
||||
|
||||
|
@ -371,7 +371,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||
// initial cache
|
||||
TestEntity entity = new TestEntity(requestCacheStats, indexShard);
|
||||
Loader loader = new Loader(reader, 0);
|
||||
BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes);
|
||||
BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("foo", value.streamInput().readString());
|
||||
assertEquals(0, requestCacheStats.stats().getHitCount());
|
||||
assertEquals(1, requestCacheStats.stats().getMissCount());
|
||||
|
@ -382,7 +382,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||
// cache hit
|
||||
entity = new TestEntity(requestCacheStats, indexShard);
|
||||
loader = new Loader(reader, 0);
|
||||
value = cache.getOrCompute(entity, loader, reader, termBytes);
|
||||
value = cache.getOrCompute(entity, loader, reader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("foo", value.streamInput().readString());
|
||||
assertEquals(1, requestCacheStats.stats().getHitCount());
|
||||
assertEquals(1, requestCacheStats.stats().getMissCount());
|
||||
|
@ -396,7 +396,7 @@ public class IndicesRequestCacheTests extends ESTestCase {
|
|||
entity = new TestEntity(requestCacheStats, indexShard);
|
||||
loader = new Loader(reader, 0);
|
||||
cache.invalidate(entity, reader, termBytes);
|
||||
value = cache.getOrCompute(entity, loader, reader, termBytes);
|
||||
value = cache.getOrCompute(entity, loader, reader, termBytes, () -> termQuery.toString());
|
||||
assertEquals("foo", value.streamInput().readString());
|
||||
assertEquals(1, requestCacheStats.stats().getHitCount());
|
||||
assertEquals(2, requestCacheStats.stats().getMissCount());
|
||||
|
|
|
@ -54,6 +54,8 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
private static final Script MAP_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "mapScript", Collections.emptyMap());
|
||||
private static final Script COMBINE_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScript",
|
||||
Collections.emptyMap());
|
||||
private static final Script REDUCE_SCRIPT = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "reduceScript",
|
||||
Collections.emptyMap());
|
||||
|
||||
private static final Script INIT_SCRIPT_SCORE = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "initScriptScore",
|
||||
Collections.emptyMap());
|
||||
|
@ -61,6 +63,8 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
Collections.emptyMap());
|
||||
private static final Script COMBINE_SCRIPT_SCORE = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScriptScore",
|
||||
Collections.emptyMap());
|
||||
private static final Script COMBINE_SCRIPT_NOOP = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScriptNoop",
|
||||
Collections.emptyMap());
|
||||
|
||||
private static final Script INIT_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "initScriptParams",
|
||||
Collections.singletonMap("initialValue", 24));
|
||||
|
@ -96,6 +100,14 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
return ((List<Integer>) state.get("collector")).stream().mapToInt(Integer::intValue).sum();
|
||||
});
|
||||
SCRIPTS.put("combineScriptNoop", params -> {
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
return state;
|
||||
});
|
||||
SCRIPTS.put("reduceScript", params -> {
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
return state;
|
||||
});
|
||||
|
||||
SCRIPTS.put("initScriptScore", params -> {
|
||||
Map<String, Object> state = (Map<String, Object>) params.get("state");
|
||||
|
@ -160,7 +172,7 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
try (IndexReader indexReader = DirectoryReader.open(directory)) {
|
||||
ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
|
||||
aggregationBuilder.mapScript(MAP_SCRIPT); // map script is mandatory, even if its not used in this case
|
||||
aggregationBuilder.mapScript(MAP_SCRIPT).combineScript(COMBINE_SCRIPT_NOOP).reduceScript(REDUCE_SCRIPT);
|
||||
ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder);
|
||||
assertEquals(AGG_NAME, scriptedMetric.getName());
|
||||
assertNotNull(scriptedMetric.aggregation());
|
||||
|
@ -169,9 +181,6 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* without combine script, the "states" map should contain a list of the size of the number of documents matched
|
||||
*/
|
||||
public void testScriptedMetricWithoutCombine() throws IOException {
|
||||
try (Directory directory = newDirectory()) {
|
||||
int numDocs = randomInt(100);
|
||||
|
@ -182,15 +191,28 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
try (IndexReader indexReader = DirectoryReader.open(directory)) {
|
||||
ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT);
|
||||
ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder);
|
||||
assertEquals(AGG_NAME, scriptedMetric.getName());
|
||||
assertNotNull(scriptedMetric.aggregation());
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> agg = (Map<String, Object>) scriptedMetric.aggregation();
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Integer> list = (List<Integer>) agg.get("collector");
|
||||
assertEquals(numDocs, list.size());
|
||||
aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT).reduceScript(REDUCE_SCRIPT);
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
|
||||
() -> search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder));
|
||||
assertEquals(exception.getMessage(), "[combineScript] must not be null: [scriptedMetric]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testScriptedMetricWithoutReduce() throws IOException {
|
||||
try (Directory directory = newDirectory()) {
|
||||
int numDocs = randomInt(100);
|
||||
try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) {
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i)));
|
||||
}
|
||||
}
|
||||
try (IndexReader indexReader = DirectoryReader.open(directory)) {
|
||||
ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT).combineScript(COMBINE_SCRIPT);
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
|
||||
() -> search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder));
|
||||
assertEquals(exception.getMessage(), "[reduceScript] must not be null: [scriptedMetric]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -208,7 +230,8 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
try (IndexReader indexReader = DirectoryReader.open(directory)) {
|
||||
ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT).combineScript(COMBINE_SCRIPT);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT)
|
||||
.combineScript(COMBINE_SCRIPT).reduceScript(REDUCE_SCRIPT);
|
||||
ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder);
|
||||
assertEquals(AGG_NAME, scriptedMetric.getName());
|
||||
assertNotNull(scriptedMetric.aggregation());
|
||||
|
@ -230,7 +253,8 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
try (IndexReader indexReader = DirectoryReader.open(directory)) {
|
||||
ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT_SCORE).mapScript(MAP_SCRIPT_SCORE).combineScript(COMBINE_SCRIPT_SCORE);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT_SCORE).mapScript(MAP_SCRIPT_SCORE)
|
||||
.combineScript(COMBINE_SCRIPT_SCORE).reduceScript(REDUCE_SCRIPT);
|
||||
ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder);
|
||||
assertEquals(AGG_NAME, scriptedMetric.getName());
|
||||
assertNotNull(scriptedMetric.aggregation());
|
||||
|
@ -250,7 +274,8 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
|
||||
try (IndexReader indexReader = DirectoryReader.open(directory)) {
|
||||
ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT_PARAMS).mapScript(MAP_SCRIPT_PARAMS).combineScript(COMBINE_SCRIPT_PARAMS);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT_PARAMS).mapScript(MAP_SCRIPT_PARAMS)
|
||||
.combineScript(COMBINE_SCRIPT_PARAMS).reduceScript(REDUCE_SCRIPT);
|
||||
ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder);
|
||||
|
||||
// The result value depends on the script params.
|
||||
|
@ -270,8 +295,8 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
try (IndexReader indexReader = DirectoryReader.open(directory)) {
|
||||
ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
|
||||
Map<String, Object> aggParams = Collections.singletonMap(CONFLICTING_PARAM_NAME, "blah");
|
||||
aggregationBuilder.params(aggParams).initScript(INIT_SCRIPT_PARAMS).mapScript(MAP_SCRIPT_PARAMS).
|
||||
combineScript(COMBINE_SCRIPT_PARAMS);
|
||||
aggregationBuilder.params(aggParams).initScript(INIT_SCRIPT_PARAMS).mapScript(MAP_SCRIPT_PARAMS)
|
||||
.combineScript(COMBINE_SCRIPT_PARAMS).reduceScript(REDUCE_SCRIPT);
|
||||
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () ->
|
||||
search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder)
|
||||
|
@ -289,7 +314,8 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
try (IndexReader indexReader = DirectoryReader.open(directory)) {
|
||||
ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT_SELF_REF).mapScript(MAP_SCRIPT);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT_SELF_REF).mapScript(MAP_SCRIPT)
|
||||
.combineScript(COMBINE_SCRIPT_PARAMS).reduceScript(REDUCE_SCRIPT);
|
||||
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () ->
|
||||
search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder)
|
||||
|
@ -309,7 +335,8 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
try (IndexReader indexReader = DirectoryReader.open(directory)) {
|
||||
ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT_SELF_REF);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT_SELF_REF)
|
||||
.combineScript(COMBINE_SCRIPT_PARAMS).reduceScript(REDUCE_SCRIPT);
|
||||
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () ->
|
||||
search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder)
|
||||
|
@ -326,7 +353,8 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase {
|
|||
}
|
||||
try (IndexReader indexReader = DirectoryReader.open(directory)) {
|
||||
ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT).combineScript(COMBINE_SCRIPT_SELF_REF);
|
||||
aggregationBuilder.initScript(INIT_SCRIPT).mapScript(MAP_SCRIPT)
|
||||
.combineScript(COMBINE_SCRIPT_SELF_REF).reduceScript(REDUCE_SCRIPT);
|
||||
|
||||
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () ->
|
||||
search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder)
|
||||
|
|
|
@ -153,6 +153,14 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
return newAggregation;
|
||||
});
|
||||
|
||||
scripts.put("no-op aggregation", vars -> {
|
||||
return (Map<String, Object>) vars.get("state");
|
||||
});
|
||||
|
||||
scripts.put("no-op list aggregation", vars -> {
|
||||
return (List<List<?>>) vars.get("states");
|
||||
});
|
||||
|
||||
// Equivalent to:
|
||||
//
|
||||
// newaggregation = [];
|
||||
|
@ -188,6 +196,11 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
Integer sum = 0;
|
||||
|
||||
List<Map<String, Object>> states = (List<Map<String, Object>>) vars.get("states");
|
||||
|
||||
if(states == null) {
|
||||
return newAggregation;
|
||||
}
|
||||
|
||||
for (Map<String, Object> state : states) {
|
||||
List<?> list = (List<?>) state.get("list");
|
||||
if (list != null) {
|
||||
|
@ -328,10 +341,14 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
|
||||
public void testMap() {
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state['count'] = 1", Collections.emptyMap());
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"no-op list aggregation", Collections.emptyMap());
|
||||
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
.setQuery(matchAllQuery())
|
||||
.addAggregation(scriptedMetric("scripted").mapScript(mapScript))
|
||||
.addAggregation(scriptedMetric("scripted").mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript))
|
||||
.get();
|
||||
assertSearchResponse(response);
|
||||
assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
|
||||
|
@ -369,10 +386,18 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
Map<String, Object> aggregationParams = Collections.singletonMap("param2", 1);
|
||||
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state[param1] = param2", scriptParams);
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"no-op list aggregation", Collections.emptyMap());
|
||||
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
.setQuery(matchAllQuery())
|
||||
.addAggregation(scriptedMetric("scripted").params(aggregationParams).mapScript(mapScript))
|
||||
.addAggregation(scriptedMetric("scripted")
|
||||
.params(aggregationParams)
|
||||
.mapScript(mapScript)
|
||||
.combineScript(combineScript)
|
||||
.reduceScript(reduceScript))
|
||||
.get();
|
||||
assertSearchResponse(response);
|
||||
assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
|
||||
|
@ -423,7 +448,11 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
.initScript(
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap()))
|
||||
.mapScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"state.list.add(vars.multiplier)", Collections.emptyMap())))
|
||||
"state.list.add(vars.multiplier)", Collections.emptyMap()))
|
||||
.combineScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"no-op aggregation", Collections.emptyMap()))
|
||||
.reduceScript(new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"no-op list aggregation", Collections.emptyMap())))
|
||||
.get();
|
||||
assertSearchResponse(response);
|
||||
assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
|
||||
|
@ -466,6 +495,8 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(1)", Collections.emptyMap());
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"no-op list aggregation", Collections.emptyMap());
|
||||
|
||||
SearchResponse response = client()
|
||||
.prepareSearch("idx")
|
||||
|
@ -474,7 +505,8 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
scriptedMetric("scripted")
|
||||
.params(params)
|
||||
.mapScript(mapScript)
|
||||
.combineScript(combineScript))
|
||||
.combineScript(combineScript)
|
||||
.reduceScript(reduceScript))
|
||||
.execute().actionGet();
|
||||
assertSearchResponse(response);
|
||||
assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
|
||||
|
@ -519,6 +551,8 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
Collections.emptyMap());
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "sum state values as a new aggregation", Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"no-op list aggregation", Collections.emptyMap());
|
||||
|
||||
SearchResponse response = client()
|
||||
.prepareSearch("idx")
|
||||
|
@ -528,7 +562,8 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
.params(params)
|
||||
.initScript(initScript)
|
||||
.mapScript(mapScript)
|
||||
.combineScript(combineScript))
|
||||
.combineScript(combineScript)
|
||||
.reduceScript(reduceScript))
|
||||
.get();
|
||||
assertSearchResponse(response);
|
||||
assertThat(response.getHits().getTotalHits(), equalTo(numDocs));
|
||||
|
@ -713,6 +748,8 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
Script initScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "vars.multiplier = 3", Collections.emptyMap());
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)",
|
||||
Collections.emptyMap());
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"sum all states' state.list values as a new aggregation", Collections.emptyMap());
|
||||
|
||||
|
@ -724,6 +761,7 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
.params(params)
|
||||
.initScript(initScript)
|
||||
.mapScript(mapScript)
|
||||
.combineScript(combineScript)
|
||||
.reduceScript(reduceScript))
|
||||
.get();
|
||||
assertSearchResponse(response);
|
||||
|
@ -752,6 +790,8 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(vars.multiplier)",
|
||||
Collections.emptyMap());
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"sum all states' state.list values as a new aggregation", Collections.emptyMap());
|
||||
|
||||
|
@ -762,6 +802,7 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
scriptedMetric("scripted")
|
||||
.params(params)
|
||||
.mapScript(mapScript)
|
||||
.combineScript(combineScript)
|
||||
.reduceScript(reduceScript))
|
||||
.get();
|
||||
assertSearchResponse(response);
|
||||
|
@ -980,6 +1021,11 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
*/
|
||||
public void testDontCacheScripts() throws Exception {
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state['count'] = 1", Collections.emptyMap());
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"no-op list aggregation", Collections.emptyMap());
|
||||
|
||||
assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=long")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
.get());
|
||||
|
@ -994,7 +1040,7 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
|
||||
// Test that a request using a script does not get cached
|
||||
SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0)
|
||||
.addAggregation(scriptedMetric("foo").mapScript(mapScript)).get();
|
||||
.addAggregation(scriptedMetric("foo").mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript)).get();
|
||||
assertSearchResponse(r);
|
||||
|
||||
assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache()
|
||||
|
@ -1006,10 +1052,17 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
public void testConflictingAggAndScriptParams() {
|
||||
Map<String, Object> params = Collections.singletonMap("param1", "12");
|
||||
Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "state.list.add(1)", params);
|
||||
Script combineScript =
|
||||
new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap());
|
||||
Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME,
|
||||
"no-op list aggregation", Collections.emptyMap());
|
||||
|
||||
SearchRequestBuilder builder = client().prepareSearch("idx")
|
||||
.setQuery(matchAllQuery())
|
||||
.addAggregation(scriptedMetric("scripted").params(params).mapScript(mapScript));
|
||||
.addAggregation(scriptedMetric("scripted")
|
||||
.params(params).mapScript(mapScript)
|
||||
.combineScript(combineScript)
|
||||
.reduceScript(reduceScript));
|
||||
|
||||
SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, builder::get);
|
||||
assertThat(ex.getCause().getMessage(), containsString("Parameter name \"param1\" used in both aggregation and script parameters"));
|
||||
|
|
|
@ -826,16 +826,16 @@ public class TopHitsIT extends ESIntegTestCase {
|
|||
assertThat(topReviewers.getHits().getAt(2).getId(), equalTo("1"));
|
||||
assertThat(extractValue("name", topReviewers.getHits().getAt(2).getSourceAsMap()), equalTo("user c"));
|
||||
assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getOffset(), equalTo(1));
|
||||
assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
|
||||
assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getOffset(), equalTo(2));
|
||||
assertThat(topReviewers.getHits().getAt(2).getNestedIdentity().getChild().getOffset(), equalTo(0));
|
||||
|
||||
assertThat(topReviewers.getHits().getAt(3).getId(), equalTo("1"));
|
||||
assertThat(extractValue("name", topReviewers.getHits().getAt(3).getSourceAsMap()), equalTo("user c"));
|
||||
assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getOffset(), equalTo(1));
|
||||
assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getField().string(), equalTo("reviewers"));
|
||||
assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getOffset(), equalTo(0));
|
||||
assertThat(topReviewers.getHits().getAt(3).getNestedIdentity().getChild().getOffset(), equalTo(2));
|
||||
|
||||
assertThat(topReviewers.getHits().getAt(4).getId(), equalTo("1"));
|
||||
assertThat(extractValue("name", topReviewers.getHits().getAt(4).getSourceAsMap()), equalTo("user d"));
|
||||
|
|
|
@ -208,9 +208,9 @@ public class InnerHitsIT extends ESIntegTestCase {
|
|||
int size = randomIntBetween(0, numDocs);
|
||||
BoolQueryBuilder boolQuery = new BoolQueryBuilder();
|
||||
boolQuery.should(nestedQuery("field1", matchAllQuery(), ScoreMode.Avg).innerHit(new InnerHitBuilder("a").setSize(size)
|
||||
.addSort(new FieldSortBuilder("_doc").order(SortOrder.DESC))));
|
||||
.addSort(new FieldSortBuilder("_doc").order(SortOrder.ASC))));
|
||||
boolQuery.should(nestedQuery("field2", matchAllQuery(), ScoreMode.Avg).innerHit(new InnerHitBuilder("b")
|
||||
.addSort(new FieldSortBuilder("_doc").order(SortOrder.DESC)).setSize(size)));
|
||||
.addSort(new FieldSortBuilder("_doc").order(SortOrder.ASC)).setSize(size)));
|
||||
SearchResponse searchResponse = client().prepareSearch("idx")
|
||||
.setQuery(boolQuery)
|
||||
.setSize(numDocs)
|
||||
|
|
|
@ -360,6 +360,7 @@ public class ShardChangesIT extends ESIntegTestCase {
|
|||
assertMaxSeqNoOfUpdatesIsTransferred(resolveIndex("index1"), resolveIndex("index2"), numberOfShards);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33337")
|
||||
public void testFollowIndexAndCloseNode() throws Exception {
|
||||
internalCluster().ensureAtLeastNumDataNodes(3);
|
||||
String leaderIndexSettings = getIndexSettings(3, 1, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"));
|
||||
|
|
|
@ -862,6 +862,9 @@ public class DocumentLevelSecurityTests extends SecurityIntegTestCase {
|
|||
.startObject()
|
||||
.field("field2", "value2")
|
||||
.endObject()
|
||||
.startObject()
|
||||
.array("field2", "value2", "value3")
|
||||
.endObject()
|
||||
.endArray()
|
||||
.endObject())
|
||||
.get();
|
||||
|
@ -889,6 +892,9 @@ public class DocumentLevelSecurityTests extends SecurityIntegTestCase {
|
|||
assertThat(response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(0).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(0).getSourceAsString(),
|
||||
equalTo("{\"field2\":\"value2\"}"));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(1).getNestedIdentity().getOffset(), equalTo(1));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("nested_field").getAt(1).getSourceAsString(),
|
||||
equalTo("{\"field2\":[\"value2\",\"value3\"]}"));
|
||||
}
|
||||
|
||||
public void testSuggesters() throws Exception {
|
||||
|
|
Loading…
Reference in New Issue