Merge branch 'master' into feature-suggest-refactoring
Conflicts: core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java
This commit is contained in:
commit
421ed1228b
|
@ -112,6 +112,9 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
include 'config/**'
|
||||
include 'bin/**'
|
||||
}
|
||||
if (project.path.startsWith(':modules:') == false) {
|
||||
into('elasticsearch')
|
||||
}
|
||||
}
|
||||
project.assemble.dependsOn(bundle)
|
||||
|
||||
|
|
|
@ -37,6 +37,8 @@
|
|||
hard to distinguish from the digit 1 (one). -->
|
||||
<module name="UpperEll"/>
|
||||
|
||||
<module name="EqualsHashCode" />
|
||||
|
||||
<!-- We don't use Java's builtin serialization and we suppress all warning
|
||||
about it. The flip side of that coin is that we shouldn't _try_ to use
|
||||
it. We can't outright ban it with ForbiddenApis because it complain about
|
||||
|
|
|
@ -673,7 +673,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]PipelineExecutionService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]PipelineStore.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]core[/\\]CompoundProcessor.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]core[/\\]ConfigurationUtils.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]core[/\\]IngestDocument.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]core[/\\]Pipeline.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]processor[/\\]ConvertProcessor.java" checks="LineLength" />
|
||||
|
|
|
@ -128,4 +128,6 @@ java.util.Collections#EMPTY_SET
|
|||
java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness
|
||||
@defaultMessage Use org.elasticsearch.common.Randomness#get for reproducible sources of randomness
|
||||
java.util.Random#<init>()
|
||||
java.util.concurrent.ThreadLocalRandom
|
||||
java.util.concurrent.ThreadLocalRandom
|
||||
|
||||
java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
# Elasticsearch plugin descriptor file
|
||||
# This file must exist as 'plugin-descriptor.properties' at
|
||||
# the root directory of all plugins.
|
||||
# This file must exist as 'plugin-descriptor.properties' in a folder named `elasticsearch`
|
||||
# inside all plugins.
|
||||
#
|
||||
### example plugin for "foo"
|
||||
#
|
||||
# foo.zip <-- zip file for the plugin, with this structure:
|
||||
# <arbitrary name1>.jar <-- classes, resources, dependencies
|
||||
# <arbitrary nameN>.jar <-- any number of jars
|
||||
# plugin-descriptor.properties <-- example contents below:
|
||||
#|____elasticsearch/
|
||||
#| |____ <arbitrary name1>.jar <-- classes, resources, dependencies
|
||||
#| |____ <arbitrary nameN>.jar <-- any number of jars
|
||||
#| |____ plugin-descriptor.properties <-- example contents below:
|
||||
#
|
||||
# classname=foo.bar.BazPlugin
|
||||
# description=My cool plugin
|
||||
|
|
|
@ -28,6 +28,8 @@ import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction
|
|||
import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
|
||||
|
@ -268,6 +270,7 @@ public class ActionModule extends AbstractModule {
|
|||
registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
|
||||
registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
|
||||
registerAction(ListTasksAction.INSTANCE, TransportListTasksAction.class);
|
||||
registerAction(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class);
|
||||
|
||||
registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
|
||||
registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Action for cancelling running tasks
|
||||
*/
|
||||
public class CancelTasksAction extends Action<CancelTasksRequest, CancelTasksResponse, CancelTasksRequestBuilder> {
|
||||
|
||||
public static final CancelTasksAction INSTANCE = new CancelTasksAction();
|
||||
public static final String NAME = "cluster:admin/tasks/cancel";
|
||||
|
||||
private CancelTasksAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CancelTasksResponse newResponse() {
|
||||
return new CancelTasksResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public CancelTasksRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new CancelTasksRequestBuilder(client, this);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
|
||||
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.tasks.CancellableTask;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A request to cancel tasks
|
||||
*/
|
||||
public class CancelTasksRequest extends BaseTasksRequest<CancelTasksRequest> {
|
||||
|
||||
public static final String DEFAULT_REASON = "by user request";
|
||||
|
||||
private String reason = DEFAULT_REASON;
|
||||
|
||||
/**
|
||||
* Cancel tasks on the specified nodes. If none are passed, all cancellable tasks on
|
||||
* all nodes will be cancelled.
|
||||
*/
|
||||
public CancelTasksRequest(String... nodesIds) {
|
||||
super(nodesIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
reason = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(reason);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean match(Task task) {
|
||||
return super.match(task) && task instanceof CancellableTask;
|
||||
}
|
||||
|
||||
public CancelTasksRequest reason(String reason) {
|
||||
this.reason = reason;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String reason() {
|
||||
return reason;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
|
||||
|
||||
import org.elasticsearch.action.support.tasks.TasksRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Builder for the request to cancel tasks running on the specified nodes
|
||||
*/
|
||||
public class CancelTasksRequestBuilder extends TasksRequestBuilder<CancelTasksRequest, CancelTasksResponse, CancelTasksRequestBuilder> {
|
||||
|
||||
public CancelTasksRequestBuilder(ElasticsearchClient client, CancelTasksAction action) {
|
||||
super(client, action, new CancelTasksRequest());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
|
||||
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Returns the list of tasks that were cancelled
|
||||
*/
|
||||
public class CancelTasksResponse extends ListTasksResponse {
|
||||
|
||||
public CancelTasksResponse() {
|
||||
}
|
||||
|
||||
public CancelTasksResponse(List<TaskInfo> tasks, List<TaskOperationFailure> taskFailures, List<? extends FailedNodeException>
|
||||
nodeFailures) {
|
||||
super(tasks, taskFailures, nodeFailures);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,285 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
|
||||
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
|
||||
import org.elasticsearch.action.support.tasks.TransportTasksAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.CancellableTask;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.EmptyTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* Transport action that can be used to cancel currently running cancellable tasks.
|
||||
* <p>
|
||||
* For a task to be cancellable it has to return an instance of
|
||||
* {@link CancellableTask} from {@link TransportRequest#createTask(long, String, String)}
|
||||
*/
|
||||
public class TransportCancelTasksAction extends TransportTasksAction<CancellableTask, CancelTasksRequest, CancelTasksResponse, TaskInfo> {
|
||||
|
||||
public static final String BAN_PARENT_ACTION_NAME = "internal:admin/tasks/ban";
|
||||
|
||||
@Inject
|
||||
public TransportCancelTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver
|
||||
indexNameExpressionResolver) {
|
||||
super(settings, CancelTasksAction.NAME, clusterName, threadPool, clusterService, transportService, actionFilters,
|
||||
indexNameExpressionResolver, CancelTasksRequest::new, CancelTasksResponse::new, ThreadPool.Names.MANAGEMENT);
|
||||
transportService.registerRequestHandler(BAN_PARENT_ACTION_NAME, BanParentTaskRequest::new, ThreadPool.Names.SAME, new
|
||||
BanParentRequestHandler());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CancelTasksResponse newResponse(CancelTasksRequest request, List<TaskInfo> tasks, List<TaskOperationFailure>
|
||||
taskOperationFailures, List<FailedNodeException> failedNodeExceptions) {
|
||||
return new CancelTasksResponse(tasks, taskOperationFailures, failedNodeExceptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TaskInfo readTaskResponse(StreamInput in) throws IOException {
|
||||
return new TaskInfo(in);
|
||||
}
|
||||
|
||||
protected void processTasks(CancelTasksRequest request, Consumer<CancellableTask> operation) {
|
||||
if (request.taskId() != BaseTasksRequest.ALL_TASKS) {
|
||||
// we are only checking one task, we can optimize it
|
||||
CancellableTask task = taskManager.getCancellableTask(request.taskId());
|
||||
if (task != null) {
|
||||
if (request.match(task)) {
|
||||
operation.accept(task);
|
||||
} else {
|
||||
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support this operation");
|
||||
}
|
||||
} else {
|
||||
if (taskManager.getTask(request.taskId()) != null) {
|
||||
// The task exists, but doesn't support cancellation
|
||||
throw new IllegalArgumentException("task [" + request.taskId() + "] doesn't support cancellation");
|
||||
} else {
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support cancellation", request.taskId());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (CancellableTask task : taskManager.getCancellableTasks().values()) {
|
||||
if (request.match(task)) {
|
||||
operation.accept(task);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized TaskInfo taskOperation(CancelTasksRequest request, CancellableTask cancellableTask) {
|
||||
final BanLock banLock = new BanLock(nodes -> removeBanOnNodes(cancellableTask, nodes));
|
||||
Set<String> childNodes = taskManager.cancel(cancellableTask, request.reason(), banLock::onTaskFinished);
|
||||
if (childNodes != null) {
|
||||
if (childNodes.isEmpty()) {
|
||||
logger.trace("cancelling task {} with no children", cancellableTask.getId());
|
||||
return cancellableTask.taskInfo(clusterService.localNode(), false);
|
||||
} else {
|
||||
logger.trace("cancelling task {} with children on nodes [{}]", cancellableTask.getId(), childNodes);
|
||||
setBanOnNodes(request.reason(), cancellableTask, childNodes, banLock);
|
||||
return cancellableTask.taskInfo(clusterService.localNode(), false);
|
||||
}
|
||||
} else {
|
||||
logger.trace("task {} is already cancelled", cancellableTask.getId());
|
||||
throw new IllegalStateException("task with id " + cancellableTask.getId() + " is already cancelled");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return true;
|
||||
}
|
||||
|
||||
private void setBanOnNodes(String reason, CancellableTask task, Set<String> nodes, BanLock banLock) {
|
||||
sendSetBanRequest(nodes, new BanParentTaskRequest(clusterService.localNode().getId(), task.getId(), reason), banLock);
|
||||
}
|
||||
|
||||
private void removeBanOnNodes(CancellableTask task, Set<String> nodes) {
|
||||
sendRemoveBanRequest(nodes, new BanParentTaskRequest(clusterService.localNode().getId(), task.getId()));
|
||||
}
|
||||
|
||||
private void sendSetBanRequest(Set<String> nodes, BanParentTaskRequest request, BanLock banLock) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
for (String node : nodes) {
|
||||
DiscoveryNode discoveryNode = clusterState.getNodes().get(node);
|
||||
if (discoveryNode != null) {
|
||||
// Check if node still in the cluster
|
||||
logger.debug("Sending ban for tasks with the parent [{}:{}] to the node [{}], ban [{}]", request.parentNodeId, request
|
||||
.parentTaskId, node, request.ban);
|
||||
transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request,
|
||||
new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
|
||||
@Override
|
||||
public void handleResponse(TransportResponse.Empty response) {
|
||||
banLock.onBanSet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
banLock.onBanSet();
|
||||
}
|
||||
});
|
||||
} else {
|
||||
banLock.onBanSet();
|
||||
logger.debug("Cannot send ban for tasks with the parent [{}:{}] to the node [{}] - the node no longer in the cluster",
|
||||
request.parentNodeId, request.parentTaskId, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void sendRemoveBanRequest(Set<String> nodes, BanParentTaskRequest request) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
for (String node : nodes) {
|
||||
DiscoveryNode discoveryNode = clusterState.getNodes().get(node);
|
||||
if (discoveryNode != null) {
|
||||
// Check if node still in the cluster
|
||||
logger.debug("Sending remove ban for tasks with the parent [{}:{}] to the node [{}]", request.parentNodeId,
|
||||
request.parentTaskId, node);
|
||||
transportService.sendRequest(discoveryNode, BAN_PARENT_ACTION_NAME, request, EmptyTransportResponseHandler
|
||||
.INSTANCE_SAME);
|
||||
} else {
|
||||
logger.debug("Cannot send remove ban request for tasks with the parent [{}:{}] to the node [{}] - the node no longer in " +
|
||||
"the cluster", request.parentNodeId, request.parentTaskId, node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class BanLock {
|
||||
private final Consumer<Set<String>> finish;
|
||||
private final AtomicInteger counter;
|
||||
private final AtomicReference<Set<String>> nodes = new AtomicReference<>();
|
||||
|
||||
public BanLock(Consumer<Set<String>> finish) {
|
||||
counter = new AtomicInteger(0);
|
||||
this.finish = finish;
|
||||
}
|
||||
|
||||
public void onBanSet() {
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finish();
|
||||
}
|
||||
}
|
||||
|
||||
public void onTaskFinished(Set<String> nodes) {
|
||||
this.nodes.set(nodes);
|
||||
if (counter.addAndGet(nodes.size()) == 0) {
|
||||
finish();
|
||||
}
|
||||
}
|
||||
|
||||
public void finish() {
|
||||
finish.accept(nodes.get());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class BanParentTaskRequest extends TransportRequest {
|
||||
|
||||
private String parentNodeId;
|
||||
|
||||
private long parentTaskId;
|
||||
|
||||
private boolean ban;
|
||||
|
||||
private String reason;
|
||||
|
||||
BanParentTaskRequest(String parentNodeId, long parentTaskId, String reason) {
|
||||
this.parentNodeId = parentNodeId;
|
||||
this.parentTaskId = parentTaskId;
|
||||
this.ban = true;
|
||||
this.reason = reason;
|
||||
}
|
||||
|
||||
BanParentTaskRequest(String parentNodeId, long parentTaskId) {
|
||||
this.parentNodeId = parentNodeId;
|
||||
this.parentTaskId = parentTaskId;
|
||||
this.ban = false;
|
||||
}
|
||||
|
||||
public BanParentTaskRequest() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
parentNodeId = in.readString();
|
||||
parentTaskId = in.readLong();
|
||||
ban = in.readBoolean();
|
||||
if (ban) {
|
||||
reason = in.readString();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(parentNodeId);
|
||||
out.writeLong(parentTaskId);
|
||||
out.writeBoolean(ban);
|
||||
if (ban) {
|
||||
out.writeString(reason);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class BanParentRequestHandler implements TransportRequestHandler<BanParentTaskRequest> {
|
||||
@Override
|
||||
public void messageReceived(final BanParentTaskRequest request, final TransportChannel channel) throws Exception {
|
||||
if (request.ban) {
|
||||
logger.debug("Received ban for the parent [{}:{}] on the node [{}], reason: [{}]", request.parentNodeId, request
|
||||
.parentTaskId, clusterService.localNode().getId(), request.reason);
|
||||
taskManager.setBan(request.parentNodeId, request.parentTaskId, request.reason);
|
||||
} else {
|
||||
logger.debug("Removing ban for the parent [{}:{}] on the node [{}]", request.parentNodeId, request.parentTaskId,
|
||||
clusterService.localNode().getId());
|
||||
taskManager.removeBan(request.parentNodeId, request.parentTaskId);
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -105,7 +105,9 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
if (getTaskFailures() != null && getTaskFailures().size() > 0) {
|
||||
builder.startArray("task_failures");
|
||||
for (TaskOperationFailure ex : getTaskFailures()){
|
||||
builder.startObject();
|
||||
builder.value(ex);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
@ -113,7 +115,9 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
if (getNodeFailures() != null && getNodeFailures().size() > 0) {
|
||||
builder.startArray("node_failures");
|
||||
for (FailedNodeException ex : getNodeFailures()) {
|
||||
builder.value(ex);
|
||||
builder.startObject();
|
||||
ex.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
|
|
@ -30,17 +30,17 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportListTasksAction extends TransportTasksAction<ListTasksRequest, ListTasksResponse, TaskInfo> {
|
||||
public class TransportListTasksAction extends TransportTasksAction<Task, ListTasksRequest, ListTasksResponse, TaskInfo> {
|
||||
|
||||
@Inject
|
||||
public TransportListTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
|
|
|
@ -286,24 +286,25 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
|
|||
return addValidationError("Must specify at least one alias action", validationException);
|
||||
}
|
||||
for (AliasActions aliasAction : allAliasActions) {
|
||||
if (aliasAction.aliases.length == 0) {
|
||||
if (CollectionUtils.isEmpty(aliasAction.aliases)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: aliases may not be empty", validationException);
|
||||
}
|
||||
for (String alias : aliasAction.aliases) {
|
||||
if (!Strings.hasText(alias)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: [alias] may not be empty string", validationException);
|
||||
+ "]: Property [alias/aliases] is either missing or null", validationException);
|
||||
} else {
|
||||
for (String alias : aliasAction.aliases) {
|
||||
if (!Strings.hasText(alias)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: [alias/aliases] may not be empty string", validationException);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (CollectionUtils.isEmpty(aliasAction.indices)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: Property [index] was either missing or null", validationException);
|
||||
+ "]: Property [index/indices] is either missing or null", validationException);
|
||||
} else {
|
||||
for (String index : aliasAction.indices) {
|
||||
if (!Strings.hasText(index)) {
|
||||
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
|
||||
+ "]: [index] may not be empty string", validationException);
|
||||
+ "]: [index/indices] may not be empty string", validationException);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,20 +44,6 @@ public abstract class ChildTaskActionRequest<Request extends ActionRequest<Reque
|
|||
this.parentTaskId = parentTaskId;
|
||||
}
|
||||
|
||||
/**
|
||||
* The node that owns the parent task.
|
||||
*/
|
||||
public String getParentTaskNode() {
|
||||
return parentTaskNode;
|
||||
}
|
||||
|
||||
/**
|
||||
* The task id of the parent task on the parent node.
|
||||
*/
|
||||
public long getParentTaskId() {
|
||||
return parentTaskId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
@ -73,8 +59,12 @@ public abstract class ChildTaskActionRequest<Request extends ActionRequest<Reque
|
|||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action) {
|
||||
return new Task(id, type, action, this::getDescription, parentTaskNode, parentTaskId);
|
||||
public final Task createTask(long id, String type, String action) {
|
||||
return createTask(id, type, action, parentTaskNode, parentTaskId);
|
||||
}
|
||||
|
||||
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
|
||||
return new Task(id, type, action, getDescription(), parentTaskNode, parentTaskId);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -58,7 +58,11 @@ public class ChildTaskRequest extends TransportRequest {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action) {
|
||||
return new Task(id, type, action, this::getDescription, parentTaskNode, parentTaskId);
|
||||
public final Task createTask(long id, String type, String action) {
|
||||
return createTask(id, type, action, parentTaskNode, parentTaskId);
|
||||
}
|
||||
|
||||
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
|
||||
return new Task(id, type, action, getDescription(), parentTaskNode, parentTaskId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -87,6 +87,10 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
|||
|
||||
protected abstract ShardResponse shardOperation(ShardRequest request);
|
||||
|
||||
protected ShardResponse shardOperation(ShardRequest request, Task task) {
|
||||
return shardOperation(request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines the shards this operation will be executed on. The operation is executed once per shard iterator, typically
|
||||
* on the first shard in it. If the operation fails, it will be retried on the next shard in the iterator.
|
||||
|
@ -172,6 +176,7 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
|||
// no node connected, act as failure
|
||||
onOperation(shard, shardIt, shardIndex, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
taskManager.registerChildTask(task, node.getId());
|
||||
transportService.sendRequest(node, transportShardAction, shardRequest, new BaseTransportResponseHandler<ShardResponse>() {
|
||||
@Override
|
||||
public ShardResponse newInstance() {
|
||||
|
@ -278,8 +283,13 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
|||
class ShardTransportHandler implements TransportRequestHandler<ShardRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(final ShardRequest request, final TransportChannel channel) throws Exception {
|
||||
public void messageReceived(ShardRequest request, TransportChannel channel, Task task) throws Exception {
|
||||
channel.sendResponse(shardOperation(request));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void messageReceived(final ShardRequest request, final TransportChannel channel) throws Exception {
|
||||
throw new UnsupportedOperationException("the task parameter is required");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -301,6 +301,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
NodeRequest nodeRequest = new NodeRequest(node.getId(), request, shards);
|
||||
if (task != null) {
|
||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
||||
taskManager.registerChildTask(task, node.getId());
|
||||
}
|
||||
transportService.sendRequest(node, transportNodeBroadcastAction, nodeRequest, new BaseTransportResponseHandler<NodeResponse>() {
|
||||
@Override
|
||||
|
|
|
@ -159,6 +159,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
}
|
||||
}
|
||||
};
|
||||
taskManager.registerChildTask(task, nodes.getLocalNodeId());
|
||||
threadPool.executor(executor).execute(new ActionRunnable(delegate) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
|
@ -171,6 +172,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
logger.debug("no known master node, scheduling a retry");
|
||||
retry(null, MasterNodeChangePredicate.INSTANCE);
|
||||
} else {
|
||||
taskManager.registerChildTask(task, nodes.masterNode().getId());
|
||||
transportService.sendRequest(nodes.masterNode(), actionName, request, new ActionListenerResponseHandler<Response>(listener) {
|
||||
@Override
|
||||
public Response newInstance() {
|
||||
|
|
|
@ -95,6 +95,10 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
|
||||
protected abstract NodeResponse nodeOperation(NodeRequest request);
|
||||
|
||||
protected NodeResponse nodeOperation(NodeRequest request, Task task) {
|
||||
return nodeOperation(request);
|
||||
}
|
||||
|
||||
protected abstract boolean accumulateExceptions();
|
||||
|
||||
protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
|
||||
|
@ -163,6 +167,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
ChildTaskRequest nodeRequest = newNodeRequest(nodeId, request);
|
||||
if (task != null) {
|
||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
||||
taskManager.registerChildTask(task, node.getId());
|
||||
}
|
||||
|
||||
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(), new BaseTransportResponseHandler<NodeResponse>() {
|
||||
|
@ -228,8 +233,14 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
|
|||
class NodeTransportHandler implements TransportRequestHandler<NodeRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(final NodeRequest request, final TransportChannel channel) throws Exception {
|
||||
public void messageReceived(NodeRequest request, TransportChannel channel, Task task) throws Exception {
|
||||
channel.sendResponse(nodeOperation(request, task));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void messageReceived(NodeRequest request, TransportChannel channel) throws Exception {
|
||||
channel.sendResponse(nodeOperation(request));
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -196,8 +196,8 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
|||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action) {
|
||||
return new ReplicationTask(id, type, action, this::getDescription, getParentTaskNode(), getParentTaskId());
|
||||
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
|
||||
return new ReplicationTask(id, type, action, getDescription(), parentTaskNode, parentTaskId);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -218,4 +218,9 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
|||
return index;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDescription() {
|
||||
return toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ import static java.util.Objects.requireNonNull;
|
|||
public class ReplicationTask extends Task {
|
||||
private volatile String phase = "starting";
|
||||
|
||||
public ReplicationTask(long id, String type, String action, Provider<String> description, String parentNode, long parentId) {
|
||||
public ReplicationTask(long id, String type, String action, String description, String parentNode, long parentId) {
|
||||
super(id, type, action, description, parentNode, parentId);
|
||||
}
|
||||
|
||||
|
|
|
@ -121,6 +121,7 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
|
|||
protected void shardExecute(Task task, Request request, ShardId shardId, ActionListener<ShardResponse> shardActionListener) {
|
||||
ShardRequest shardRequest = newShardRequest(request, shardId);
|
||||
shardRequest.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||
taskManager.registerChildTask(task, clusterService.localNode().getId());
|
||||
replicatedBroadcastShardAction.execute(shardRequest, shardActionListener);
|
||||
}
|
||||
|
||||
|
|
|
@ -486,6 +486,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
return;
|
||||
}
|
||||
final DiscoveryNode node = state.nodes().get(primary.currentNodeId());
|
||||
taskManager.registerChildTask(task, node.getId());
|
||||
if (primary.currentNodeId().equals(state.nodes().localNodeId())) {
|
||||
setPhase(task, "waiting_on_primary");
|
||||
if (logger.isTraceEnabled()) {
|
||||
|
|
|
@ -35,7 +35,6 @@ import java.io.IOException;
|
|||
*/
|
||||
public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends ActionRequest<Request> {
|
||||
|
||||
|
||||
public static final String[] ALL_ACTIONS = Strings.EMPTY_ARRAY;
|
||||
|
||||
public static final String[] ALL_NODES = Strings.EMPTY_ARRAY;
|
||||
|
@ -52,6 +51,8 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
|
||||
private long parentTaskId = ALL_TASKS;
|
||||
|
||||
private long taskId = ALL_TASKS;
|
||||
|
||||
public BaseTasksRequest() {
|
||||
}
|
||||
|
||||
|
@ -94,6 +95,22 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
return (Request) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the id of the task that should be processed.
|
||||
*
|
||||
* By default tasks with any ids are returned.
|
||||
*/
|
||||
public long taskId() {
|
||||
return taskId;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public final Request taskId(long taskId) {
|
||||
this.taskId = taskId;
|
||||
return (Request) this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the parent node id that tasks should be filtered by
|
||||
*/
|
||||
|
@ -141,6 +158,7 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
nodesIds = in.readStringArray();
|
||||
taskId = in.readLong();
|
||||
actions = in.readStringArray();
|
||||
parentNode = in.readOptionalString();
|
||||
parentTaskId = in.readLong();
|
||||
|
@ -153,6 +171,7 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArrayNullable(nodesIds);
|
||||
out.writeLong(taskId);
|
||||
out.writeStringArrayNullable(actions);
|
||||
out.writeOptionalString(parentNode);
|
||||
out.writeLong(parentTaskId);
|
||||
|
@ -163,12 +182,17 @@ public class BaseTasksRequest<Request extends BaseTasksRequest<Request>> extends
|
|||
if (actions() != null && actions().length > 0 && Regex.simpleMatch(actions(), task.getAction()) == false) {
|
||||
return false;
|
||||
}
|
||||
if (taskId() != ALL_TASKS) {
|
||||
if(taskId() != task.getId()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (parentNode() != null) {
|
||||
if (parentNode().equals(task.getParentNode()) == false) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (parentTaskId() != BaseTasksRequest.ALL_TASKS) {
|
||||
if (parentTaskId() != ALL_TASKS) {
|
||||
if (parentTaskId() != task.getParentId()) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.support.tasks;
|
||||
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.NoSuchNodeException;
|
||||
|
@ -53,12 +54,14 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* The base class for transport actions that are interacting with currently running tasks.
|
||||
*/
|
||||
public abstract class TransportTasksAction<
|
||||
OperationTask extends Task,
|
||||
TasksRequest extends BaseTasksRequest<TasksRequest>,
|
||||
TasksResponse extends BaseTasksResponse,
|
||||
TaskResponse extends Writeable<TaskResponse>
|
||||
|
@ -103,16 +106,16 @@ public abstract class TransportTasksAction<
|
|||
TasksRequest request = nodeTaskRequest.tasksRequest;
|
||||
List<TaskResponse> results = new ArrayList<>();
|
||||
List<TaskOperationFailure> exceptions = new ArrayList<>();
|
||||
for (Task task : taskManager.getTasks().values()) {
|
||||
// First check action and node filters
|
||||
if (request.match(task)) {
|
||||
try {
|
||||
results.add(taskOperation(request, task));
|
||||
} catch (Exception ex) {
|
||||
exceptions.add(new TaskOperationFailure(clusterService.localNode().id(), task.getId(), ex));
|
||||
processTasks(request, task -> {
|
||||
try {
|
||||
TaskResponse response = taskOperation(request, task);
|
||||
if (response != null) {
|
||||
results.add(response);
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
exceptions.add(new TaskOperationFailure(clusterService.localNode().id(), task.getId(), ex));
|
||||
}
|
||||
}
|
||||
});
|
||||
return new NodeTasksResponse(clusterService.localNode().id(), results, exceptions);
|
||||
}
|
||||
|
||||
|
@ -124,6 +127,28 @@ public abstract class TransportTasksAction<
|
|||
return clusterState.nodes().resolveNodesIds(request.nodesIds());
|
||||
}
|
||||
|
||||
protected void processTasks(TasksRequest request, Consumer<OperationTask> operation) {
|
||||
if (request.taskId() != BaseTasksRequest.ALL_TASKS) {
|
||||
// we are only checking one task, we can optimize it
|
||||
Task task = taskManager.getTask(request.taskId());
|
||||
if (task != null) {
|
||||
if (request.match(task)) {
|
||||
operation.accept((OperationTask) task);
|
||||
} else {
|
||||
throw new ResourceNotFoundException("task [{}] doesn't support this operation", request.taskId());
|
||||
}
|
||||
} else {
|
||||
throw new ResourceNotFoundException("task [{}] is missing", request.taskId());
|
||||
}
|
||||
} else {
|
||||
for (Task task : taskManager.getTasks().values()) {
|
||||
if (request.match(task)) {
|
||||
operation.accept((OperationTask)task);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract TasksResponse newResponse(TasksRequest request, List<TaskResponse> tasks, List<TaskOperationFailure> taskOperationFailures, List<FailedNodeException> failedNodeExceptions);
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -150,7 +175,7 @@ public abstract class TransportTasksAction<
|
|||
|
||||
protected abstract TaskResponse readTaskResponse(StreamInput in) throws IOException;
|
||||
|
||||
protected abstract TaskResponse taskOperation(TasksRequest request, Task task);
|
||||
protected abstract TaskResponse taskOperation(TasksRequest request, OperationTask task);
|
||||
|
||||
protected boolean transportCompress() {
|
||||
return false;
|
||||
|
@ -213,6 +238,7 @@ public abstract class TransportTasksAction<
|
|||
} else {
|
||||
NodeTaskRequest nodeRequest = new NodeTaskRequest(request);
|
||||
nodeRequest.setParentTask(clusterService.localNode().id(), task.getId());
|
||||
taskManager.registerChildTask(task, node.getId());
|
||||
transportService.sendRequest(node, transportNodeAction, nodeRequest, builder.build(), new BaseTransportResponseHandler<NodeTasksResponse>() {
|
||||
@Override
|
||||
public NodeTasksResponse newInstance() {
|
||||
|
|
|
@ -33,6 +33,9 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
|||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
|
@ -287,6 +290,29 @@ public interface ClusterAdminClient extends ElasticsearchClient {
|
|||
*/
|
||||
ListTasksRequestBuilder prepareListTasks(String... nodesIds);
|
||||
|
||||
/**
|
||||
* Cancel tasks
|
||||
*
|
||||
* @param request The nodes tasks request
|
||||
* @return The result future
|
||||
* @see org.elasticsearch.client.Requests#cancelTasksRequest(String...)
|
||||
*/
|
||||
ActionFuture<CancelTasksResponse> cancelTasks(CancelTasksRequest request);
|
||||
|
||||
/**
|
||||
* Cancel active tasks
|
||||
*
|
||||
* @param request The nodes tasks request
|
||||
* @param listener A cancelener to be notified with a result
|
||||
* @see org.elasticsearch.client.Requests#cancelTasksRequest(String...)
|
||||
*/
|
||||
void cancelTasks(CancelTasksRequest request, ActionListener<CancelTasksResponse> listener);
|
||||
|
||||
/**
|
||||
* Cancel active tasks
|
||||
*/
|
||||
CancelTasksRequestBuilder prepareCancelTasks(String... nodesIds);
|
||||
|
||||
/**
|
||||
* Returns list of shards the given search would be executed on.
|
||||
*/
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.client;
|
|||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
|
@ -420,12 +421,23 @@ public class Requests {
|
|||
*
|
||||
* @param nodesIds The nodes ids to get the tasks for
|
||||
* @return The nodes tasks request
|
||||
* @see org.elasticsearch.client.ClusterAdminClient#nodesStats(org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest)
|
||||
* @see org.elasticsearch.client.ClusterAdminClient#listTasks(ListTasksRequest)
|
||||
*/
|
||||
public static ListTasksRequest listTasksRequest(String... nodesIds) {
|
||||
return new ListTasksRequest(nodesIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a nodes tasks request against one or more nodes. Pass <tt>null</tt> or an empty array for all nodes.
|
||||
*
|
||||
* @param nodesIds The nodes ids to cancel the tasks on
|
||||
* @return The nodes tasks request
|
||||
* @see org.elasticsearch.client.ClusterAdminClient#cancelTasks(CancelTasksRequest)
|
||||
*/
|
||||
public static CancelTasksRequest cancelTasksRequest(String... nodesIds) {
|
||||
return new CancelTasksRequest(nodesIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Registers snapshot repository
|
||||
*
|
||||
|
|
|
@ -41,6 +41,10 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
|
|||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequestBuilder;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequestBuilder;
|
||||
|
@ -992,6 +996,22 @@ public abstract class AbstractClient extends AbstractComponent implements Client
|
|||
return new ListTasksRequestBuilder(this, ListTasksAction.INSTANCE).setNodesIds(nodesIds);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public ActionFuture<CancelTasksResponse> cancelTasks(CancelTasksRequest request) {
|
||||
return execute(CancelTasksAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cancelTasks(CancelTasksRequest request, ActionListener<CancelTasksResponse> listener) {
|
||||
execute(CancelTasksAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CancelTasksRequestBuilder prepareCancelTasks(String... nodesIds) {
|
||||
return new CancelTasksRequestBuilder(this, CancelTasksAction.INSTANCE).setNodesIds(nodesIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<ClusterSearchShardsResponse> searchShards(final ClusterSearchShardsRequest request) {
|
||||
return execute(ClusterSearchShardsAction.INSTANCE, request);
|
||||
|
|
|
@ -290,7 +290,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
|
||||
}
|
||||
final Map<String, MappingMetaData> mappings = new HashMap<>();
|
||||
MetaData.Builder builder = MetaData.builder(currentState.metaData());
|
||||
for (String index : request.indices()) {
|
||||
// do the actual merge here on the master, and update the mapping source
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
|
@ -311,7 +311,6 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
// same source, no changes, ignore it
|
||||
} else {
|
||||
// use the merged mapping source
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
|
@ -320,28 +319,24 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
|
||||
}
|
||||
} else {
|
||||
mappings.put(index, new MappingMetaData(mergedMapper));
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] create_mapping [{}] with source [{}]", index, mappingType, updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] create_mapping [{}]", index, mappingType);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (mappings.isEmpty()) {
|
||||
// no changes, return
|
||||
return currentState;
|
||||
}
|
||||
MetaData.Builder builder = MetaData.builder(currentState.metaData());
|
||||
for (String indexName : request.indices()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(indexName);
|
||||
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(indexName);
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
MappingMetaData mappingMd = mappings.get(indexName);
|
||||
if (mappingMd != null) {
|
||||
builder.put(IndexMetaData.builder(indexMetaData).putMapping(mappingMd));
|
||||
IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
|
||||
// Mapping updates on a single type may have side-effects on other types so we need to
|
||||
// update mapping metadata on all types
|
||||
for (DocumentMapper mapper : indexService.mapperService().docMappers(true)) {
|
||||
indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource()));
|
||||
}
|
||||
builder.put(indexMetaDataBuilder);
|
||||
}
|
||||
|
||||
return ClusterState.builder(currentState).metaData(builder).build();
|
||||
|
|
|
@ -189,6 +189,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
@Override
|
||||
protected void doStart() {
|
||||
add(localNodeMasterListeners);
|
||||
add(taskManager);
|
||||
this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build();
|
||||
this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), threadPool.getThreadContext());
|
||||
this.reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ReconnectToNodes());
|
||||
|
|
|
@ -19,20 +19,23 @@
|
|||
|
||||
package org.elasticsearch.common.hash;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
||||
import java.security.MessageDigest;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.Objects;
|
||||
|
||||
public class MessageDigests {
|
||||
/**
|
||||
* This MessageDigests class provides convenience methods for obtaining
|
||||
* thread local {@link MessageDigest} instances for MD5, SHA-1, and
|
||||
* SHA-256 message digests.
|
||||
*/
|
||||
public final class MessageDigests {
|
||||
|
||||
private static ThreadLocal<MessageDigest> createThreadLocalMessageDigest(String digest) {
|
||||
return ThreadLocal.withInitial(() -> {
|
||||
try {
|
||||
return MessageDigest.getInstance(digest);
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new ElasticsearchException("unexpected exception creating MessageDigest instance for [" + digest + "]", e);
|
||||
throw new IllegalStateException("unexpected exception creating MessageDigest instance for [" + digest + "]", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -41,14 +44,38 @@ public class MessageDigests {
|
|||
private static final ThreadLocal<MessageDigest> SHA_1_DIGEST = createThreadLocalMessageDigest("SHA-1");
|
||||
private static final ThreadLocal<MessageDigest> SHA_256_DIGEST = createThreadLocalMessageDigest("SHA-256");
|
||||
|
||||
/**
|
||||
* Returns a {@link MessageDigest} instance for MD5 digests; note
|
||||
* that the instance returned is thread local and must not be
|
||||
* shared amongst threads.
|
||||
*
|
||||
* @return a thread local {@link MessageDigest} instance that
|
||||
* provides MD5 message digest functionality.
|
||||
*/
|
||||
public static MessageDigest md5() {
|
||||
return get(MD5_DIGEST);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link MessageDigest} instance for SHA-1 digests; note
|
||||
* that the instance returned is thread local and must not be
|
||||
* shared amongst threads.
|
||||
*
|
||||
* @return a thread local {@link MessageDigest} instance that
|
||||
* provides SHA-1 message digest functionality.
|
||||
*/
|
||||
public static MessageDigest sha1() {
|
||||
return get(SHA_1_DIGEST);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@link MessageDigest} instance for SHA-256 digests;
|
||||
* note that the instance returned is thread local and must not be
|
||||
* shared amongst threads.
|
||||
*
|
||||
* @return a thread local {@link MessageDigest} instance that
|
||||
* provides SHA-256 message digest functionality.
|
||||
*/
|
||||
public static MessageDigest sha256() {
|
||||
return get(SHA_256_DIGEST);
|
||||
}
|
||||
|
@ -61,6 +88,12 @@ public class MessageDigests {
|
|||
|
||||
private static final char[] HEX_DIGITS = "0123456789abcdef".toCharArray();
|
||||
|
||||
/**
|
||||
* Format a byte array as a hex string.
|
||||
*
|
||||
* @param bytes the input to be represented as hex.
|
||||
* @return a hex representation of the input as a String.
|
||||
*/
|
||||
public static String toHexString(byte[] bytes) {
|
||||
Objects.requireNonNull(bytes);
|
||||
StringBuilder sb = new StringBuilder(2 * bytes.length);
|
||||
|
|
|
@ -116,6 +116,11 @@ public class FieldValueFactorFunction extends ScoreFunction {
|
|||
Objects.equals(this.modifier, fieldValueFactorFunction.modifier);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(boostFactor, field, modifier);
|
||||
}
|
||||
|
||||
/**
|
||||
* The Type class encapsulates the modification types that can be applied
|
||||
* to the score/value product.
|
||||
|
|
|
@ -25,6 +25,8 @@ import org.elasticsearch.index.fielddata.AtomicFieldData;
|
|||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Pseudo randomly generate a score for each {@link LeafScoreFunction#score}.
|
||||
*/
|
||||
|
@ -92,4 +94,9 @@ public class RandomScoreFunction extends ScoreFunction {
|
|||
return this.originalSeed == randomScoreFunction.originalSeed &&
|
||||
this.saltedSeed == randomScoreFunction.saltedSeed;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(originalSeed, saltedSeed);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,4 +66,15 @@ public abstract class ScoreFunction {
|
|||
* Indicates whether some other {@link ScoreFunction} object of the same type is "equal to" this one.
|
||||
*/
|
||||
protected abstract boolean doEquals(ScoreFunction other);
|
||||
|
||||
@Override
|
||||
public final int hashCode() {
|
||||
/*
|
||||
* Override hashCode here and forward to an abstract method to force extensions of this class to override hashCode in the same
|
||||
* way that we force them to override equals. This also prevents false positives in CheckStyle's EqualsHashCode check.
|
||||
*/
|
||||
return Objects.hash(scoreCombiner, doHashCode());
|
||||
}
|
||||
|
||||
protected abstract int doHashCode();
|
||||
}
|
||||
|
|
|
@ -133,4 +133,9 @@ public class ScriptScoreFunction extends ScoreFunction {
|
|||
ScriptScoreFunction scriptScoreFunction = (ScriptScoreFunction) other;
|
||||
return Objects.equals(this.sScript, scriptScoreFunction.sScript);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(sScript);
|
||||
}
|
||||
}
|
|
@ -93,6 +93,11 @@ public class WeightFactorFunction extends ScoreFunction {
|
|||
Objects.equals(this.scoreFunction, weightFactorFunction.scoreFunction);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(weight, scoreFunction);
|
||||
}
|
||||
|
||||
private static class ScoreOne extends ScoreFunction {
|
||||
|
||||
protected ScoreOne(CombineFunction scoreCombiner) {
|
||||
|
@ -123,5 +128,10 @@ public class WeightFactorFunction extends ScoreFunction {
|
|||
protected boolean doEquals(ScoreFunction other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthActio
|
|||
import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.node.tasks.RestCancelTasksAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.node.tasks.RestListTasksAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction;
|
||||
|
@ -265,6 +266,7 @@ public class NetworkModule extends AbstractModule {
|
|||
|
||||
// Tasks API
|
||||
RestListTasksAction.class,
|
||||
RestCancelTasksAction.class,
|
||||
|
||||
// Ingest API
|
||||
RestPutPipelineAction.class,
|
||||
|
|
|
@ -38,16 +38,16 @@ public abstract class AbstractXContentParser implements XContentParser {
|
|||
|
||||
private ParseFieldMatcher matcher = ParseFieldMatcher.STRICT;
|
||||
|
||||
//Currently this is not a setting that can be changed and is a policy
|
||||
// Currently this is not a setting that can be changed and is a policy
|
||||
// that relates to how parsing of things like "boost" are done across
|
||||
// the whole of Elasticsearch (eg if String "1.0" is a valid float).
|
||||
// The idea behind keeping it as a constant is that we can track
|
||||
// references to this policy decision throughout the codebase and find
|
||||
// and change any code that needs to apply an alternative policy.
|
||||
public static final boolean DEFAULT_NUMBER_COEERCE_POLICY = true;
|
||||
public static final boolean DEFAULT_NUMBER_COERCE_POLICY = true;
|
||||
|
||||
private static void checkCoerceString(boolean coeerce, Class<? extends Number> clazz) {
|
||||
if (!coeerce) {
|
||||
private static void checkCoerceString(boolean coerce, Class<? extends Number> clazz) {
|
||||
if (!coerce) {
|
||||
//Need to throw type IllegalArgumentException as current catch logic in
|
||||
//NumberFieldMapper.parseCreateField relies on this for "malformed" value detection
|
||||
throw new IllegalArgumentException(clazz.getSimpleName() + " value passed as String");
|
||||
|
@ -102,7 +102,7 @@ public abstract class AbstractXContentParser implements XContentParser {
|
|||
|
||||
@Override
|
||||
public short shortValue() throws IOException {
|
||||
return shortValue(DEFAULT_NUMBER_COEERCE_POLICY);
|
||||
return shortValue(DEFAULT_NUMBER_COERCE_POLICY);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -121,7 +121,7 @@ public abstract class AbstractXContentParser implements XContentParser {
|
|||
|
||||
@Override
|
||||
public int intValue() throws IOException {
|
||||
return intValue(DEFAULT_NUMBER_COEERCE_POLICY);
|
||||
return intValue(DEFAULT_NUMBER_COERCE_POLICY);
|
||||
}
|
||||
|
||||
|
||||
|
@ -141,7 +141,7 @@ public abstract class AbstractXContentParser implements XContentParser {
|
|||
|
||||
@Override
|
||||
public long longValue() throws IOException {
|
||||
return longValue(DEFAULT_NUMBER_COEERCE_POLICY);
|
||||
return longValue(DEFAULT_NUMBER_COERCE_POLICY);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -160,7 +160,7 @@ public abstract class AbstractXContentParser implements XContentParser {
|
|||
|
||||
@Override
|
||||
public float floatValue() throws IOException {
|
||||
return floatValue(DEFAULT_NUMBER_COEERCE_POLICY);
|
||||
return floatValue(DEFAULT_NUMBER_COERCE_POLICY);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -178,7 +178,7 @@ public abstract class AbstractXContentParser implements XContentParser {
|
|||
|
||||
@Override
|
||||
public double doubleValue() throws IOException {
|
||||
return doubleValue(DEFAULT_NUMBER_COEERCE_POLICY);
|
||||
return doubleValue(DEFAULT_NUMBER_COERCE_POLICY);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.env;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
|
|
|
@ -145,8 +145,8 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
|
||||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
StringFieldMapper.Builder builder = stringField(name);
|
||||
public Mapper.Builder parse(String fieldName, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
StringFieldMapper.Builder builder = stringField(fieldName);
|
||||
// hack for the fact that string can't just accept true/false for
|
||||
// the index property and still accepts no/not_analyzed/analyzed
|
||||
final Object index = node.remove("index");
|
||||
|
@ -165,10 +165,10 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
node.put("index", false);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Can't parse [index] value [" + index + "], expected [true], [false], [no], [not_analyzed] or [analyzed]");
|
||||
throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true], [false], [no], [not_analyzed] or [analyzed]");
|
||||
}
|
||||
}
|
||||
parseTextField(builder, name, node, parserContext);
|
||||
parseTextField(builder, fieldName, node, parserContext);
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
String propName = Strings.toUnderscoreCase(entry.getKey());
|
||||
|
@ -182,7 +182,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
} else if (propName.equals("search_quote_analyzer")) {
|
||||
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
|
||||
if (analyzer == null) {
|
||||
throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
|
||||
throw new MapperParsingException("Analyzer [" + propNode.toString() + "] not found for field [" + fieldName + "]");
|
||||
}
|
||||
builder.searchQuotedAnalyzer(analyzer);
|
||||
iterator.remove();
|
||||
|
@ -207,7 +207,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
} else if (propName.equals("ignore_above")) {
|
||||
builder.ignoreAbove(XContentMapValues.nodeIntegerValue(propNode, -1));
|
||||
iterator.remove();
|
||||
} else if (parseMultiField(builder, name, parserContext, propName, propNode)) {
|
||||
} else if (parseMultiField(builder, fieldName, parserContext, propName, propNode)) {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -336,7 +336,7 @@ public class TypeParsers {
|
|||
case "false":
|
||||
return false;
|
||||
default:
|
||||
throw new IllegalArgumentException("Can't parse [index] value [" + index + "], expected [true] or [false]");
|
||||
throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true] or [false]");
|
||||
}
|
||||
} else {
|
||||
final String normalizedIndex = Strings.toUnderscoreCase(index);
|
||||
|
@ -349,7 +349,7 @@ public class TypeParsers {
|
|||
case "no":
|
||||
return false;
|
||||
default:
|
||||
throw new IllegalArgumentException("Can't parse [index] value [" + index + "], expected [true], [false], [no], [not_analyzed] or [analyzed]");
|
||||
throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true], [false], [no], [not_analyzed] or [analyzed]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -388,7 +388,7 @@ public class TypeParsers {
|
|||
}
|
||||
SimilarityProvider similarityProvider = parserContext.getSimilarity(value);
|
||||
if (similarityProvider == null) {
|
||||
throw new MapperParsingException("Unknown Similarity type [" + value + "] for [" + name + "]");
|
||||
throw new MapperParsingException("Unknown Similarity type [" + value + "] for field [" + name + "]");
|
||||
}
|
||||
return similarityProvider;
|
||||
}
|
||||
|
|
|
@ -383,6 +383,11 @@ public abstract class DecayFunctionBuilder<DFB extends DecayFunctionBuilder> ext
|
|||
return super.doEquals(other) &&
|
||||
Objects.equals(this.origin, geoFieldDataScoreFunction.origin);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(super.doHashCode(), origin);
|
||||
}
|
||||
}
|
||||
|
||||
static class NumericFieldDataScoreFunction extends AbstractDistanceScoreFunction {
|
||||
|
@ -533,5 +538,10 @@ public abstract class DecayFunctionBuilder<DFB extends DecayFunctionBuilder> ext
|
|||
Objects.equals(this.func, distanceScoreFunction.func) &&
|
||||
Objects.equals(this.getFieldName(), distanceScoreFunction.getFieldName());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(scale, offset, mode, func, getFieldName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,8 @@ public final class ConfigurationUtils {
|
|||
*
|
||||
* If the property value isn't of type string a {@link ElasticsearchParseException} is thrown.
|
||||
*/
|
||||
public static String readOptionalStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
|
||||
public static String readOptionalStringProperty(String processorType, String processorTag, Map<String, Object> configuration,
|
||||
String propertyName) {
|
||||
Object value = configuration.remove(propertyName);
|
||||
return readString(processorType, processorTag, propertyName, value);
|
||||
}
|
||||
|
@ -49,7 +50,8 @@ public final class ConfigurationUtils {
|
|||
* If the property value isn't of type string an {@link ElasticsearchParseException} is thrown.
|
||||
* If the property is missing an {@link ElasticsearchParseException} is thrown
|
||||
*/
|
||||
public static String readStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
|
||||
public static String readStringProperty(String processorType, String processorTag, Map<String, Object> configuration,
|
||||
String propertyName) {
|
||||
return readStringProperty(processorType, processorTag, configuration, propertyName, null);
|
||||
}
|
||||
|
||||
|
@ -59,7 +61,8 @@ public final class ConfigurationUtils {
|
|||
* If the property value isn't of type string a {@link ElasticsearchParseException} is thrown.
|
||||
* If the property is missing and no default value has been specified a {@link ElasticsearchParseException} is thrown
|
||||
*/
|
||||
public static String readStringProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName, String defaultValue) {
|
||||
public static String readStringProperty(String processorType, String processorTag, Map<String, Object> configuration,
|
||||
String propertyName, String defaultValue) {
|
||||
Object value = configuration.remove(propertyName);
|
||||
if (value == null && defaultValue != null) {
|
||||
return defaultValue;
|
||||
|
@ -76,7 +79,28 @@ public final class ConfigurationUtils {
|
|||
if (value instanceof String) {
|
||||
return (String) value;
|
||||
}
|
||||
throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a string, but of type [" + value.getClass().getName() + "]");
|
||||
throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a string, but of type [" +
|
||||
value.getClass().getName() + "]");
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns and removes the specified property from the specified configuration map.
|
||||
*
|
||||
* If the property value isn't of type int a {@link ElasticsearchParseException} is thrown.
|
||||
* If the property is missing an {@link ElasticsearchParseException} is thrown
|
||||
*/
|
||||
public static int readIntProperty(String processorType, String processorTag, Map<String, Object> configuration, String propertyName,
|
||||
int defaultValue) {
|
||||
Object value = configuration.remove(propertyName);
|
||||
if (value == null) {
|
||||
return defaultValue;
|
||||
}
|
||||
try {
|
||||
return Integer.parseInt(value.toString());
|
||||
} catch (Throwable t) {
|
||||
throw newConfigurationException(processorType, processorTag, propertyName,
|
||||
"property cannot be converted to an int [" + value.toString() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -84,7 +108,8 @@ public final class ConfigurationUtils {
|
|||
*
|
||||
* If the property value isn't of type list an {@link ElasticsearchParseException} is thrown.
|
||||
*/
|
||||
public static <T> List<T> readOptionalList(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
|
||||
public static <T> List<T> readOptionalList(String processorType, String processorTag, Map<String, Object> configuration,
|
||||
String propertyName) {
|
||||
Object value = configuration.remove(propertyName);
|
||||
if (value == null) {
|
||||
return null;
|
||||
|
@ -113,7 +138,8 @@ public final class ConfigurationUtils {
|
|||
List<T> stringList = (List<T>) value;
|
||||
return stringList;
|
||||
} else {
|
||||
throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a list, but of type [" + value.getClass().getName() + "]");
|
||||
throw newConfigurationException(processorType, processorTag, propertyName,
|
||||
"property isn't a list, but of type [" + value.getClass().getName() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -123,7 +149,8 @@ public final class ConfigurationUtils {
|
|||
* If the property value isn't of type map an {@link ElasticsearchParseException} is thrown.
|
||||
* If the property is missing an {@link ElasticsearchParseException} is thrown
|
||||
*/
|
||||
public static <T> Map<String, T> readMap(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
|
||||
public static <T> Map<String, T> readMap(String processorType, String processorTag, Map<String, Object> configuration,
|
||||
String propertyName) {
|
||||
Object value = configuration.remove(propertyName);
|
||||
if (value == null) {
|
||||
throw newConfigurationException(processorType, processorTag, propertyName, "required property is missing");
|
||||
|
@ -137,7 +164,8 @@ public final class ConfigurationUtils {
|
|||
*
|
||||
* If the property value isn't of type map an {@link ElasticsearchParseException} is thrown.
|
||||
*/
|
||||
public static <T> Map<String, T> readOptionalMap(String processorType, String processorTag, Map<String, Object> configuration, String propertyName) {
|
||||
public static <T> Map<String, T> readOptionalMap(String processorType, String processorTag, Map<String, Object> configuration,
|
||||
String propertyName) {
|
||||
Object value = configuration.remove(propertyName);
|
||||
if (value == null) {
|
||||
return null;
|
||||
|
@ -152,7 +180,8 @@ public final class ConfigurationUtils {
|
|||
Map<String, T> map = (Map<String, T>) value;
|
||||
return map;
|
||||
} else {
|
||||
throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a map, but of type [" + value.getClass().getName() + "]");
|
||||
throw newConfigurationException(processorType, processorTag, propertyName,
|
||||
"property isn't a map, but of type [" + value.getClass().getName() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -167,7 +196,8 @@ public final class ConfigurationUtils {
|
|||
return value;
|
||||
}
|
||||
|
||||
public static ElasticsearchParseException newConfigurationException(String processorType, String processorTag, String propertyName, String reason) {
|
||||
public static ElasticsearchParseException newConfigurationException(String processorType, String processorTag, String propertyName,
|
||||
String reason) {
|
||||
ElasticsearchParseException exception = new ElasticsearchParseException("[" + propertyName + "] " + reason);
|
||||
|
||||
if (processorType != null) {
|
||||
|
@ -182,7 +212,8 @@ public final class ConfigurationUtils {
|
|||
return exception;
|
||||
}
|
||||
|
||||
public static List<Processor> readProcessorConfigs(List<Map<String, Map<String, Object>>> processorConfigs, ProcessorsRegistry processorRegistry) throws Exception {
|
||||
public static List<Processor> readProcessorConfigs(List<Map<String, Map<String, Object>>> processorConfigs,
|
||||
ProcessorsRegistry processorRegistry) throws Exception {
|
||||
List<Processor> processors = new ArrayList<>();
|
||||
if (processorConfigs != null) {
|
||||
for (Map<String, Map<String, Object>> processorConfigWithKey : processorConfigs) {
|
||||
|
@ -197,12 +228,15 @@ public final class ConfigurationUtils {
|
|||
private static Processor readProcessor(ProcessorsRegistry processorRegistry, String type, Map<String, Object> config) throws Exception {
|
||||
Processor.Factory factory = processorRegistry.getProcessorFactory(type);
|
||||
if (factory != null) {
|
||||
List<Map<String, Map<String, Object>>> onFailureProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, Pipeline.ON_FAILURE_KEY);
|
||||
List<Map<String, Map<String, Object>>> onFailureProcessorConfigs =
|
||||
ConfigurationUtils.readOptionalList(null, null, config, Pipeline.ON_FAILURE_KEY);
|
||||
|
||||
List<Processor> onFailureProcessors = readProcessorConfigs(onFailureProcessorConfigs, processorRegistry);
|
||||
Processor processor;
|
||||
processor = factory.create(config);
|
||||
if (!config.isEmpty()) {
|
||||
throw new ElasticsearchParseException("processor [" + type + "] doesn't support one or more provided configuration parameters " + Arrays.toString(config.keySet().toArray()));
|
||||
throw new ElasticsearchParseException("processor [{}] doesn't support one or more provided configuration parameters {}",
|
||||
type, Arrays.toString(config.keySet().toArray()));
|
||||
}
|
||||
if (onFailureProcessors.isEmpty()) {
|
||||
return processor;
|
||||
|
|
|
@ -208,17 +208,23 @@ class InstallPluginCommand extends CliTool.Command {
|
|||
return zip;
|
||||
}
|
||||
|
||||
private Path unzip(Path zip, Path pluginsDir) throws IOException {
|
||||
private Path unzip(Path zip, Path pluginsDir) throws IOException, UserError {
|
||||
// unzip plugin to a staging temp dir
|
||||
Path target = Files.createTempDirectory(pluginsDir, ".installing-");
|
||||
Files.createDirectories(target);
|
||||
|
||||
boolean hasEsDir = false;
|
||||
// TODO: we should wrap this in a try/catch and try deleting the target dir on failure?
|
||||
try (ZipInputStream zipInput = new ZipInputStream(Files.newInputStream(zip))) {
|
||||
ZipEntry entry;
|
||||
byte[] buffer = new byte[8192];
|
||||
while ((entry = zipInput.getNextEntry()) != null) {
|
||||
Path targetFile = target.resolve(entry.getName());
|
||||
if (entry.getName().startsWith("elasticsearch/") == false) {
|
||||
// only extract the elasticsearch directory
|
||||
continue;
|
||||
}
|
||||
hasEsDir = true;
|
||||
Path targetFile = target.resolve(entry.getName().substring("elasticsearch/".length()));
|
||||
// TODO: handle name being an absolute path
|
||||
|
||||
// be on the safe side: do not rely on that directories are always extracted
|
||||
|
@ -236,6 +242,10 @@ class InstallPluginCommand extends CliTool.Command {
|
|||
}
|
||||
}
|
||||
Files.delete(zip);
|
||||
if (hasEsDir == false) {
|
||||
IOUtils.rm(target);
|
||||
throw new UserError(CliTool.ExitStatus.DATA_ERROR, "`elasticsearch` directory is missing in the plugin zip");
|
||||
}
|
||||
return target;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest.action.admin.cluster.node.tasks;
|
||||
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.support.RestToXContentListener;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
|
||||
|
||||
public class RestCancelTasksAction extends BaseRestHandler {
|
||||
|
||||
@Inject
|
||||
public RestCancelTasksAction(Settings settings, RestController controller, Client client) {
|
||||
super(settings, client);
|
||||
controller.registerHandler(POST, "/_tasks/_cancel", this);
|
||||
controller.registerHandler(POST, "/_tasks/{nodeId}/_cancel", this);
|
||||
controller.registerHandler(POST, "/_tasks/{nodeId}/{taskId}/_cancel", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
|
||||
String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId"));
|
||||
long taskId = request.paramAsLong("taskId", ListTasksRequest.ALL_TASKS);
|
||||
String[] actions = Strings.splitStringByCommaToArray(request.param("actions"));
|
||||
String parentNode = request.param("parent_node");
|
||||
long parentTaskId = request.paramAsLong("parent_task", ListTasksRequest.ALL_TASKS);
|
||||
|
||||
CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(nodesIds);
|
||||
cancelTasksRequest.taskId(taskId);
|
||||
cancelTasksRequest.actions(actions);
|
||||
cancelTasksRequest.parentNode(parentNode);
|
||||
cancelTasksRequest.parentTaskId(parentTaskId);
|
||||
client.admin().cluster().cancelTasks(cancelTasksRequest, new RestToXContentListener<>(channel));
|
||||
}
|
||||
}
|
|
@ -40,18 +40,20 @@ public class RestListTasksAction extends BaseRestHandler {
|
|||
super(settings, client);
|
||||
controller.registerHandler(GET, "/_tasks", this);
|
||||
controller.registerHandler(GET, "/_tasks/{nodeId}", this);
|
||||
controller.registerHandler(GET, "/_tasks/{nodeId}/{actions}", this);
|
||||
controller.registerHandler(GET, "/_tasks/{nodeId}/{taskId}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) {
|
||||
boolean detailed = request.paramAsBoolean("detailed", false);
|
||||
String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId"));
|
||||
long taskId = request.paramAsLong("taskId", ListTasksRequest.ALL_TASKS);
|
||||
String[] actions = Strings.splitStringByCommaToArray(request.param("actions"));
|
||||
String parentNode = request.param("parent_node");
|
||||
long parentTaskId = request.paramAsLong("parent_task", ListTasksRequest.ALL_TASKS);
|
||||
|
||||
ListTasksRequest listTasksRequest = new ListTasksRequest(nodesIds);
|
||||
listTasksRequest.taskId(taskId);
|
||||
listTasksRequest.detailed(detailed);
|
||||
listTasksRequest.actions(actions);
|
||||
listTasksRequest.parentNode(parentNode);
|
||||
|
|
|
@ -133,7 +133,7 @@ public class RestIndicesAliasesAction extends BaseRestHandler {
|
|||
}
|
||||
|
||||
if (type == AliasAction.Type.ADD) {
|
||||
AliasActions aliasActions = new AliasActions(type, indices, aliases);
|
||||
AliasActions aliasActions = new AliasActions(type, indices, aliases).filter(filter);
|
||||
if (routingSet) {
|
||||
aliasActions.routing(routing);
|
||||
}
|
||||
|
|
|
@ -22,41 +22,115 @@ package org.elasticsearch.search.sort;
|
|||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.geo.GeoDistance;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.search.MultiValueMode;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A geo distance based sorting on a geo point like field.
|
||||
*/
|
||||
public class GeoDistanceSortBuilder extends SortBuilder {
|
||||
public class GeoDistanceSortBuilder extends SortBuilder
|
||||
implements ToXContent, NamedWriteable<GeoDistanceSortBuilder>, SortElementParserTemp<GeoDistanceSortBuilder> {
|
||||
public static final String NAME = "_geo_distance";
|
||||
public static final boolean DEFAULT_COERCE = false;
|
||||
public static final boolean DEFAULT_IGNORE_MALFORMED = false;
|
||||
|
||||
final String fieldName;
|
||||
static final GeoDistanceSortBuilder PROTOTYPE = new GeoDistanceSortBuilder("", -1, -1);
|
||||
|
||||
private final String fieldName;
|
||||
private final List<GeoPoint> points = new ArrayList<>();
|
||||
private final List<String> geohashes = new ArrayList<>();
|
||||
|
||||
private GeoDistance geoDistance;
|
||||
private DistanceUnit unit;
|
||||
private SortOrder order;
|
||||
private String sortMode;
|
||||
private GeoDistance geoDistance = GeoDistance.DEFAULT;
|
||||
private DistanceUnit unit = DistanceUnit.DEFAULT;
|
||||
private SortOrder order = SortOrder.ASC;
|
||||
|
||||
// TODO there is an enum that covers that parameter which we should be using here
|
||||
private String sortMode = null;
|
||||
@SuppressWarnings("rawtypes")
|
||||
private QueryBuilder nestedFilter;
|
||||
private String nestedPath;
|
||||
private Boolean coerce;
|
||||
private Boolean ignoreMalformed;
|
||||
|
||||
// TODO switch to GeoValidationMethod enum
|
||||
private boolean coerce = DEFAULT_COERCE;
|
||||
private boolean ignoreMalformed = DEFAULT_IGNORE_MALFORMED;
|
||||
|
||||
/**
|
||||
* Constructs a new distance based sort on a geo point like field.
|
||||
*
|
||||
* @param fieldName The geo point like field name.
|
||||
* @param points The points to create the range distance facets from.
|
||||
*/
|
||||
public GeoDistanceSortBuilder(String fieldName) {
|
||||
public GeoDistanceSortBuilder(String fieldName, GeoPoint... points) {
|
||||
this.fieldName = fieldName;
|
||||
if (points.length == 0) {
|
||||
throw new IllegalArgumentException("Geo distance sorting needs at least one point.");
|
||||
}
|
||||
this.points.addAll(Arrays.asList(points));
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new distance based sort on a geo point like field.
|
||||
*
|
||||
* @param fieldName The geo point like field name.
|
||||
* @param lat Latitude of the point to create the range distance facets from.
|
||||
* @param lon Longitude of the point to create the range distance facets from.
|
||||
*/
|
||||
public GeoDistanceSortBuilder(String fieldName, double lat, double lon) {
|
||||
this(fieldName, new GeoPoint(lat, lon));
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new distance based sort on a geo point like field.
|
||||
*
|
||||
* @param fieldName The geo point like field name.
|
||||
* @param geohashes The points to create the range distance facets from.
|
||||
*/
|
||||
public GeoDistanceSortBuilder(String fieldName, String ... geohashes) {
|
||||
if (geohashes.length == 0) {
|
||||
throw new IllegalArgumentException("Geo distance sorting needs at least one point.");
|
||||
}
|
||||
for (String geohash : geohashes) {
|
||||
this.points.add(GeoPoint.fromGeohash(geohash));
|
||||
}
|
||||
this.fieldName = fieldName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy constructor.
|
||||
* */
|
||||
GeoDistanceSortBuilder(GeoDistanceSortBuilder original) {
|
||||
this.fieldName = original.fieldName();
|
||||
this.points.addAll(original.points);
|
||||
this.geoDistance = original.geoDistance;
|
||||
this.unit = original.unit;
|
||||
this.order = original.order;
|
||||
this.sortMode = original.sortMode;
|
||||
this.nestedFilter = original.nestedFilter;
|
||||
this.nestedPath = original.nestedPath;
|
||||
this.coerce = original.coerce;
|
||||
this.ignoreMalformed = original.ignoreMalformed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the geo point like field the distance based sort operates on.
|
||||
* */
|
||||
public String fieldName() {
|
||||
return this.fieldName;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -79,15 +153,27 @@ public class GeoDistanceSortBuilder extends SortBuilder {
|
|||
this.points.addAll(Arrays.asList(points));
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the points to create the range distance facets from.
|
||||
*/
|
||||
public GeoPoint[] points() {
|
||||
return this.points.toArray(new GeoPoint[this.points.size()]);
|
||||
}
|
||||
|
||||
/**
|
||||
* The geohash of the geo point to create the range distance facets from.
|
||||
*
|
||||
* Deprecated - please use points(GeoPoint... points) instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public GeoDistanceSortBuilder geohashes(String... geohashes) {
|
||||
this.geohashes.addAll(Arrays.asList(geohashes));
|
||||
for (String geohash : geohashes) {
|
||||
this.points.add(GeoPoint.fromGeohash(geohash));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* The geo distance type used to compute the distance.
|
||||
*/
|
||||
|
@ -95,6 +181,13 @@ public class GeoDistanceSortBuilder extends SortBuilder {
|
|||
this.geoDistance = geoDistance;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the geo distance type used to compute the distance.
|
||||
*/
|
||||
public GeoDistance geoDistance() {
|
||||
return this.geoDistance;
|
||||
}
|
||||
|
||||
/**
|
||||
* The distance unit to use. Defaults to {@link org.elasticsearch.common.unit.DistanceUnit#KILOMETERS}
|
||||
|
@ -104,6 +197,13 @@ public class GeoDistanceSortBuilder extends SortBuilder {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the distance unit to use. Defaults to {@link org.elasticsearch.common.unit.DistanceUnit#KILOMETERS}
|
||||
*/
|
||||
public DistanceUnit unit() {
|
||||
return this.unit;
|
||||
}
|
||||
|
||||
/**
|
||||
* The order of sorting. Defaults to {@link SortOrder#ASC}.
|
||||
*/
|
||||
|
@ -113,11 +213,18 @@ public class GeoDistanceSortBuilder extends SortBuilder {
|
|||
return this;
|
||||
}
|
||||
|
||||
/** Returns the order of sorting. */
|
||||
public SortOrder order() {
|
||||
return this.order;
|
||||
}
|
||||
|
||||
/**
|
||||
* Not relevant.
|
||||
*
|
||||
* TODO should this throw an exception rather than silently ignore a parameter that is not used?
|
||||
*/
|
||||
@Override
|
||||
public SortBuilder missing(Object missing) {
|
||||
public GeoDistanceSortBuilder missing(Object missing) {
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -126,10 +233,19 @@ public class GeoDistanceSortBuilder extends SortBuilder {
|
|||
* Possible values: min and max
|
||||
*/
|
||||
public GeoDistanceSortBuilder sortMode(String sortMode) {
|
||||
MultiValueMode temp = MultiValueMode.fromString(sortMode);
|
||||
if (temp == MultiValueMode.SUM) {
|
||||
throw new IllegalArgumentException("sort_mode [sum] isn't supported for sorting by geo distance");
|
||||
}
|
||||
this.sortMode = sortMode;
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Returns which distance to use for sorting in the case a document contains multiple geo points. */
|
||||
public String sortMode() {
|
||||
return this.sortMode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the nested filter that the nested objects should match with in order to be taken into account
|
||||
* for sorting.
|
||||
|
@ -139,6 +255,14 @@ public class GeoDistanceSortBuilder extends SortBuilder {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the nested filter that the nested objects should match with in order to be taken into account
|
||||
* for sorting.
|
||||
**/
|
||||
public QueryBuilder getNestedFilter() {
|
||||
return this.nestedFilter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a
|
||||
* field inside a nested object, the nearest upper nested object is selected as nested path.
|
||||
|
@ -147,42 +271,53 @@ public class GeoDistanceSortBuilder extends SortBuilder {
|
|||
this.nestedPath = nestedPath;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a
|
||||
* field inside a nested object, the nearest upper nested object is selected as nested path.
|
||||
*/
|
||||
public String getNestedPath() {
|
||||
return this.nestedPath;
|
||||
}
|
||||
|
||||
public GeoDistanceSortBuilder coerce(boolean coerce) {
|
||||
this.coerce = coerce;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean coerce() {
|
||||
return this.coerce;
|
||||
}
|
||||
|
||||
public GeoDistanceSortBuilder ignoreMalformed(boolean ignoreMalformed) {
|
||||
this.ignoreMalformed = ignoreMalformed;
|
||||
if (coerce == false) {
|
||||
this.ignoreMalformed = ignoreMalformed;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean ignoreMalformed() {
|
||||
return this.ignoreMalformed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject("_geo_distance");
|
||||
if (geohashes.size() == 0 && points.size() == 0) {
|
||||
throw new ElasticsearchParseException("No points provided for _geo_distance sort.");
|
||||
}
|
||||
builder.startObject(NAME);
|
||||
|
||||
builder.startArray(fieldName);
|
||||
for (GeoPoint point : points) {
|
||||
builder.value(point);
|
||||
}
|
||||
for (String geohash : geohashes) {
|
||||
builder.value(geohash);
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
if (unit != null) {
|
||||
builder.field("unit", unit);
|
||||
}
|
||||
if (geoDistance != null) {
|
||||
builder.field("distance_type", geoDistance.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
builder.field("unit", unit);
|
||||
builder.field("distance_type", geoDistance.name().toLowerCase(Locale.ROOT));
|
||||
if (order == SortOrder.DESC) {
|
||||
builder.field("reverse", true);
|
||||
} else {
|
||||
builder.field("reverse", false);
|
||||
}
|
||||
|
||||
if (sortMode != null) {
|
||||
builder.field("mode", sortMode);
|
||||
}
|
||||
|
@ -193,14 +328,200 @@ public class GeoDistanceSortBuilder extends SortBuilder {
|
|||
if (nestedFilter != null) {
|
||||
builder.field("nested_filter", nestedFilter, params);
|
||||
}
|
||||
if (coerce != null) {
|
||||
builder.field("coerce", coerce);
|
||||
}
|
||||
if (ignoreMalformed != null) {
|
||||
builder.field("ignore_malformed", ignoreMalformed);
|
||||
}
|
||||
builder.field("coerce", coerce);
|
||||
builder.field("ignore_malformed", ignoreMalformed);
|
||||
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if (this == object) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (object == null || getClass() != object.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
GeoDistanceSortBuilder other = (GeoDistanceSortBuilder) object;
|
||||
return Objects.equals(fieldName, other.fieldName) &&
|
||||
Objects.deepEquals(points, other.points) &&
|
||||
Objects.equals(geoDistance, other.geoDistance) &&
|
||||
Objects.equals(unit, other.unit) &&
|
||||
Objects.equals(sortMode, other.sortMode) &&
|
||||
Objects.equals(order, other.order) &&
|
||||
Objects.equals(nestedFilter, other.nestedFilter) &&
|
||||
Objects.equals(nestedPath, other.nestedPath) &&
|
||||
Objects.equals(coerce, other.coerce) &&
|
||||
Objects.equals(ignoreMalformed, other.ignoreMalformed);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(this.fieldName, this.points, this.geoDistance,
|
||||
this.unit, this.sortMode, this.order, this.nestedFilter, this.nestedPath, this.coerce, this.ignoreMalformed);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(fieldName);
|
||||
out.writeGenericValue(points);
|
||||
|
||||
geoDistance.writeTo(out);
|
||||
unit.writeTo(out);
|
||||
order.writeTo(out);
|
||||
out.writeOptionalString(sortMode);
|
||||
if (nestedFilter != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeQuery(nestedFilter);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
out.writeOptionalString(nestedPath);
|
||||
out.writeBoolean(coerce);
|
||||
out.writeBoolean(ignoreMalformed);
|
||||
}
|
||||
|
||||
@Override
|
||||
public GeoDistanceSortBuilder readFrom(StreamInput in) throws IOException {
|
||||
String fieldName = in.readString();
|
||||
|
||||
ArrayList<GeoPoint> points = (ArrayList<GeoPoint>) in.readGenericValue();
|
||||
GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, points.toArray(new GeoPoint[points.size()]));
|
||||
|
||||
result.geoDistance(GeoDistance.readGeoDistanceFrom(in));
|
||||
result.unit(DistanceUnit.readDistanceUnit(in));
|
||||
result.order(SortOrder.readOrderFrom(in));
|
||||
String sortMode = in.readOptionalString();
|
||||
if (sortMode != null) {
|
||||
result.sortMode(sortMode);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
result.setNestedFilter(in.readQuery());
|
||||
}
|
||||
result.setNestedPath(in.readOptionalString());
|
||||
result.coerce(in.readBoolean());
|
||||
result.ignoreMalformed(in.readBoolean());
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public GeoDistanceSortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException {
|
||||
XContentParser parser = context.parser();
|
||||
String fieldName = null;
|
||||
List<GeoPoint> geoPoints = new ArrayList<>();
|
||||
DistanceUnit unit = DistanceUnit.DEFAULT;
|
||||
GeoDistance geoDistance = GeoDistance.DEFAULT;
|
||||
boolean reverse = false;
|
||||
MultiValueMode sortMode = null;
|
||||
QueryBuilder nestedFilter = null;
|
||||
String nestedPath = null;
|
||||
|
||||
boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE;
|
||||
boolean ignoreMalformed = GeoDistanceSortBuilder.DEFAULT_IGNORE_MALFORMED;
|
||||
|
||||
XContentParser.Token token;
|
||||
String currentName = parser.currentName();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
parseGeoPoints(parser, geoPoints);
|
||||
|
||||
fieldName = currentName;
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
// the json in the format of -> field : { lat : 30, lon : 12 }
|
||||
if ("nested_filter".equals(currentName) || "nestedFilter".equals(currentName)) {
|
||||
// TODO Note to remember: while this is kept as a QueryBuilder internally,
|
||||
// we need to make sure to call toFilter() on it once on the shard
|
||||
// (e.g. in the new build() method)
|
||||
nestedFilter = context.parseInnerQueryBuilder();
|
||||
} else {
|
||||
fieldName = currentName;
|
||||
GeoPoint point = new GeoPoint();
|
||||
GeoUtils.parseGeoPoint(parser, point);
|
||||
geoPoints.add(point);
|
||||
}
|
||||
} else if (token.isValue()) {
|
||||
if ("reverse".equals(currentName)) {
|
||||
reverse = parser.booleanValue();
|
||||
} else if ("order".equals(currentName)) {
|
||||
reverse = "desc".equals(parser.text());
|
||||
} else if ("unit".equals(currentName)) {
|
||||
unit = DistanceUnit.fromString(parser.text());
|
||||
} else if ("distance_type".equals(currentName) || "distanceType".equals(currentName)) {
|
||||
geoDistance = GeoDistance.fromString(parser.text());
|
||||
} else if ("coerce".equals(currentName) || "normalize".equals(currentName)) {
|
||||
coerce = parser.booleanValue();
|
||||
if (coerce == true) {
|
||||
ignoreMalformed = true;
|
||||
}
|
||||
} else if ("ignore_malformed".equals(currentName)) {
|
||||
boolean ignore_malformed_value = parser.booleanValue();
|
||||
if (coerce == false) {
|
||||
ignoreMalformed = ignore_malformed_value;
|
||||
}
|
||||
} else if ("sort_mode".equals(currentName) || "sortMode".equals(currentName) || "mode".equals(currentName)) {
|
||||
sortMode = MultiValueMode.fromString(parser.text());
|
||||
} else if ("nested_path".equals(currentName) || "nestedPath".equals(currentName)) {
|
||||
nestedPath = parser.text();
|
||||
} else {
|
||||
GeoPoint point = new GeoPoint();
|
||||
point.resetFromString(parser.text());
|
||||
geoPoints.add(point);
|
||||
fieldName = currentName;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, geoPoints.toArray(new GeoPoint[geoPoints.size()]));
|
||||
result.geoDistance(geoDistance);
|
||||
result.unit(unit);
|
||||
if (reverse) {
|
||||
result.order(SortOrder.DESC);
|
||||
} else {
|
||||
result.order(SortOrder.ASC);
|
||||
}
|
||||
if (sortMode != null) {
|
||||
result.sortMode(sortMode.name());
|
||||
}
|
||||
result.setNestedFilter(nestedFilter);
|
||||
result.setNestedPath(nestedPath);
|
||||
result.coerce(coerce);
|
||||
result.ignoreMalformed(ignoreMalformed);
|
||||
return result;
|
||||
|
||||
}
|
||||
|
||||
static void parseGeoPoints(XContentParser parser, List<GeoPoint> geoPoints) throws IOException {
|
||||
while (!parser.nextToken().equals(XContentParser.Token.END_ARRAY)) {
|
||||
if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) {
|
||||
// we might get here if the geo point is " number, number] " and the parser already moved over the opening bracket
|
||||
// in this case we cannot use GeoUtils.parseGeoPoint(..) because this expects an opening bracket
|
||||
double lon = parser.doubleValue();
|
||||
parser.nextToken();
|
||||
if (!parser.currentToken().equals(XContentParser.Token.VALUE_NUMBER)) {
|
||||
throw new ElasticsearchParseException(
|
||||
"geo point parsing: expected second number but got [{}] instead",
|
||||
parser.currentToken());
|
||||
}
|
||||
double lat = parser.doubleValue();
|
||||
GeoPoint point = new GeoPoint();
|
||||
point.reset(lat, lon);
|
||||
geoPoints.add(point);
|
||||
} else {
|
||||
GeoPoint point = new GeoPoint();
|
||||
GeoUtils.parseGeoPoint(parser, point);
|
||||
geoPoints.add(point);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -72,8 +72,8 @@ public class GeoDistanceSortParser implements SortParser {
|
|||
NestedInnerQueryParseSupport nestedHelper = null;
|
||||
|
||||
final boolean indexCreatedBeforeV2_0 = context.indexShard().getIndexSettings().getIndexVersionCreated().before(Version.V_2_0_0);
|
||||
boolean coerce = false;
|
||||
boolean ignoreMalformed = false;
|
||||
boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE;
|
||||
boolean ignoreMalformed = GeoDistanceSortBuilder.DEFAULT_IGNORE_MALFORMED;
|
||||
|
||||
XContentParser.Token token;
|
||||
String currentName = parser.currentName();
|
||||
|
@ -81,7 +81,7 @@ public class GeoDistanceSortParser implements SortParser {
|
|||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
parseGeoPoints(parser, geoPoints);
|
||||
GeoDistanceSortBuilder.parseGeoPoints(parser, geoPoints);
|
||||
|
||||
fieldName = currentName;
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
|
@ -213,26 +213,4 @@ public class GeoDistanceSortParser implements SortParser {
|
|||
return new SortField(fieldName, geoDistanceComparatorSource, reverse);
|
||||
}
|
||||
|
||||
private void parseGeoPoints(XContentParser parser, List<GeoPoint> geoPoints) throws IOException {
|
||||
while (!parser.nextToken().equals(XContentParser.Token.END_ARRAY)) {
|
||||
if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) {
|
||||
// we might get here if the geo point is " number, number] " and the parser already moved over the opening bracket
|
||||
// in this case we cannot use GeoUtils.parseGeoPoint(..) because this expects an opening bracket
|
||||
double lon = parser.doubleValue();
|
||||
parser.nextToken();
|
||||
if (!parser.currentToken().equals(XContentParser.Token.VALUE_NUMBER)) {
|
||||
throw new ElasticsearchParseException("geo point parsing: expected second number but got [{}] instead", parser.currentToken());
|
||||
}
|
||||
double lat = parser.doubleValue();
|
||||
GeoPoint point = new GeoPoint();
|
||||
point.reset(lat, lon);
|
||||
geoPoints.add(point);
|
||||
} else {
|
||||
GeoPoint point = new GeoPoint();
|
||||
GeoUtils.parseGeoPoint(parser, point);
|
||||
geoPoints.add(point);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,11 @@
|
|||
|
||||
package org.elasticsearch.search.sort;
|
||||
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* A set of static factory methods for {@link SortBuilder}s.
|
||||
*
|
||||
|
@ -58,8 +61,31 @@ public class SortBuilders {
|
|||
* A geo distance based sort.
|
||||
*
|
||||
* @param fieldName The geo point like field name.
|
||||
* @param lat Latitude of the point to create the range distance facets from.
|
||||
* @param lon Longitude of the point to create the range distance facets from.
|
||||
*
|
||||
*/
|
||||
public static GeoDistanceSortBuilder geoDistanceSort(String fieldName) {
|
||||
return new GeoDistanceSortBuilder(fieldName);
|
||||
public static GeoDistanceSortBuilder geoDistanceSort(String fieldName, double lat, double lon) {
|
||||
return new GeoDistanceSortBuilder(fieldName, lat, lon);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new distance based sort on a geo point like field.
|
||||
*
|
||||
* @param fieldName The geo point like field name.
|
||||
* @param points The points to create the range distance facets from.
|
||||
*/
|
||||
public static GeoDistanceSortBuilder geoDistanceSort(String fieldName, GeoPoint... points) {
|
||||
return new GeoDistanceSortBuilder(fieldName, points);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new distance based sort on a geo point like field.
|
||||
*
|
||||
* @param fieldName The geo point like field name.
|
||||
* @param geohashes The points to create the range distance facets from.
|
||||
*/
|
||||
public static GeoDistanceSortBuilder geoDistanceSort(String fieldName, String ... geohashes) {
|
||||
return new GeoDistanceSortBuilder(fieldName, geohashes);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.sort;
|
||||
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
// TODO once sort refactoring is done this needs to be merged into SortBuilder
|
||||
public interface SortElementParserTemp<T extends ToXContent> {
|
||||
/**
|
||||
* Creates a new SortBuilder from the json held by the {@link SortElementParserTemp}
|
||||
* in {@link org.elasticsearch.common.xcontent.XContent} format
|
||||
*
|
||||
* @param context
|
||||
* the input parse context. The state on the parser contained in
|
||||
* this context will be changed as a side effect of this method
|
||||
* call
|
||||
* @return the new item
|
||||
*/
|
||||
T fromXContent(QueryParseContext context, String elementName) throws IOException;
|
||||
}
|
|
@ -51,8 +51,7 @@ public enum SortOrder implements Writeable<SortOrder> {
|
|||
}
|
||||
};
|
||||
|
||||
public static final SortOrder DEFAULT = DESC;
|
||||
private static final SortOrder PROTOTYPE = DEFAULT;
|
||||
private static final SortOrder PROTOTYPE = ASC;
|
||||
|
||||
@Override
|
||||
public SortOrder readFrom(StreamInput in) throws IOException {
|
||||
|
|
|
@ -465,7 +465,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSugge
|
|||
}
|
||||
|
||||
@Override
|
||||
public final int hashCode() {
|
||||
protected final int doHashCode() {
|
||||
return Objects.hash(discount);
|
||||
}
|
||||
|
||||
|
@ -556,7 +556,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSugge
|
|||
}
|
||||
|
||||
@Override
|
||||
public final int hashCode() {
|
||||
protected final int doHashCode() {
|
||||
return Objects.hash(alpha);
|
||||
}
|
||||
|
||||
|
@ -636,6 +636,15 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSugge
|
|||
|
||||
public abstract SmoothingModel innerFromXContent(QueryParseContext parseContext) throws IOException;
|
||||
|
||||
@Override
|
||||
public final int hashCode() {
|
||||
/*
|
||||
* Override hashCode here and forward to an abstract method to force extensions of this class to override hashCode in the same
|
||||
* way that we force them to override equals. This also prevents false positives in CheckStyle's EqualsHashCode check.
|
||||
*/
|
||||
return doHashCode();
|
||||
}
|
||||
|
||||
public abstract WordScorerFactory buildWordScorerFactory();
|
||||
|
||||
/**
|
||||
|
@ -643,6 +652,8 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSugge
|
|||
*/
|
||||
protected abstract boolean doEquals(SmoothingModel other);
|
||||
|
||||
protected abstract int doHashCode();
|
||||
|
||||
protected abstract XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException;
|
||||
}
|
||||
|
||||
|
@ -733,7 +744,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSugge
|
|||
}
|
||||
|
||||
@Override
|
||||
public final int hashCode() {
|
||||
protected final int doHashCode() {
|
||||
return Objects.hash(trigramLambda, bigramLambda, unigramLambda);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.tasks;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* A task that can be canceled
|
||||
*/
|
||||
public class CancellableTask extends Task {
|
||||
|
||||
private final AtomicReference<String> reason = new AtomicReference<>();
|
||||
|
||||
public CancellableTask(long id, String type, String action, String description) {
|
||||
super(id, type, action, description);
|
||||
}
|
||||
|
||||
public CancellableTask(long id, String type, String action, String description, String parentNode, long parentId) {
|
||||
super(id, type, action, description, parentNode, parentId);
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is called by the task manager when this task is cancelled.
|
||||
*/
|
||||
final void cancel(String reason) {
|
||||
assert reason != null;
|
||||
this.reason.compareAndSet(null, reason);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if this task should be automatically cancelled if the coordinating node that
|
||||
* requested this task left the cluster.
|
||||
*/
|
||||
public boolean cancelOnParentLeaving() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public boolean isCancelled() {
|
||||
return reason.get() != null;
|
||||
}
|
||||
|
||||
}
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.tasks;
|
|||
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Provider;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
||||
|
@ -39,18 +38,18 @@ public class Task {
|
|||
|
||||
private final String action;
|
||||
|
||||
private final Provider<String> description;
|
||||
private final String description;
|
||||
|
||||
private final String parentNode;
|
||||
|
||||
private final long parentId;
|
||||
|
||||
|
||||
public Task(long id, String type, String action, Provider<String> description) {
|
||||
public Task(long id, String type, String action, String description) {
|
||||
this(id, type, action, description, null, NO_PARENT_ID);
|
||||
}
|
||||
|
||||
public Task(long id, String type, String action, Provider<String> description, String parentNode, long parentId) {
|
||||
public Task(long id, String type, String action, String description, String parentNode, long parentId) {
|
||||
this.id = id;
|
||||
this.type = type;
|
||||
this.action = action;
|
||||
|
@ -104,7 +103,7 @@ public class Task {
|
|||
* Generates task description
|
||||
*/
|
||||
public String getDescription() {
|
||||
return description.get();
|
||||
return description;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,34 +19,50 @@
|
|||
|
||||
package org.elasticsearch.tasks;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentMapLong;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* Task Manager service for keeping track of currently running tasks on the nodes
|
||||
*/
|
||||
public class TaskManager extends AbstractComponent {
|
||||
public class TaskManager extends AbstractComponent implements ClusterStateListener {
|
||||
|
||||
private final ConcurrentMapLong<Task> tasks = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency();
|
||||
|
||||
private final ConcurrentMapLong<CancellableTaskHolder> cancellableTasks = ConcurrentCollections
|
||||
.newConcurrentMapLongWithAggressiveConcurrency();
|
||||
|
||||
private final AtomicLong taskIdGenerator = new AtomicLong();
|
||||
|
||||
private final Map<Tuple<String, Long>, String> banedParents = new ConcurrentHashMap<>();
|
||||
|
||||
public TaskManager(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
private DiscoveryNodes lastDiscoveryNodes = DiscoveryNodes.EMPTY_NODES;
|
||||
|
||||
/**
|
||||
* Registers a task without parent task
|
||||
* <p>
|
||||
* Returns the task manager tracked task or null if the task doesn't support the task manager
|
||||
*/
|
||||
public Task register(String type, String action, TransportRequest request) {
|
||||
Task task = request.createTask(taskIdGenerator.incrementAndGet(), type, action);
|
||||
|
@ -54,24 +70,291 @@ public class TaskManager extends AbstractComponent {
|
|||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("register {} [{}] [{}] [{}]", task.getId(), type, action, task.getDescription());
|
||||
}
|
||||
Task previousTask = tasks.put(task.getId(), task);
|
||||
assert previousTask == null;
|
||||
|
||||
if (task instanceof CancellableTask) {
|
||||
CancellableTask cancellableTask = (CancellableTask) task;
|
||||
CancellableTaskHolder holder = new CancellableTaskHolder(cancellableTask);
|
||||
CancellableTaskHolder oldHolder = cancellableTasks.put(task.getId(), holder);
|
||||
assert oldHolder == null;
|
||||
// Check if this task was banned before we start it
|
||||
if (task.getParentNode() != null && banedParents.isEmpty() == false) {
|
||||
String reason = banedParents.get(new Tuple<>(task.getParentNode(), task.getParentId()));
|
||||
if (reason != null) {
|
||||
try {
|
||||
holder.cancel(reason);
|
||||
throw new IllegalStateException("Task cancelled before it started: " + reason);
|
||||
} finally {
|
||||
// let's clean up the registration
|
||||
unregister(task);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Task previousTask = tasks.put(task.getId(), task);
|
||||
assert previousTask == null;
|
||||
}
|
||||
|
||||
}
|
||||
return task;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancels a task
|
||||
* <p>
|
||||
* Returns a set of nodes with child tasks where this task should be cancelled if cancellation was successful, null otherwise.
|
||||
*/
|
||||
public Set<String> cancel(CancellableTask task, String reason, Consumer<Set<String>> listener) {
|
||||
CancellableTaskHolder holder = cancellableTasks.get(task.getId());
|
||||
if (holder != null) {
|
||||
logger.trace("cancelling task with id {}", task.getId());
|
||||
return holder.cancel(reason, listener);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister the task
|
||||
*/
|
||||
public Task unregister(Task task) {
|
||||
logger.trace("unregister task for id: {}", task.getId());
|
||||
return tasks.remove(task.getId());
|
||||
if (task instanceof CancellableTask) {
|
||||
CancellableTaskHolder holder = cancellableTasks.remove(task.getId());
|
||||
if (holder != null) {
|
||||
holder.finish();
|
||||
return holder.getTask();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
} else {
|
||||
return tasks.remove(task.getId());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the list of currently running tasks on the node
|
||||
*/
|
||||
public Map<Long, Task> getTasks() {
|
||||
return Collections.unmodifiableMap(new HashMap<>(tasks));
|
||||
HashMap<Long, Task> taskHashMap = new HashMap<>(this.tasks);
|
||||
for (CancellableTaskHolder holder : cancellableTasks.values()) {
|
||||
taskHashMap.put(holder.getTask().getId(), holder.getTask());
|
||||
}
|
||||
return Collections.unmodifiableMap(taskHashMap);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the list of currently running tasks on the node that can be cancelled
|
||||
*/
|
||||
public Map<Long, CancellableTask> getCancellableTasks() {
|
||||
HashMap<Long, CancellableTask> taskHashMap = new HashMap<>();
|
||||
for (CancellableTaskHolder holder : cancellableTasks.values()) {
|
||||
taskHashMap.put(holder.getTask().getId(), holder.getTask());
|
||||
}
|
||||
return Collections.unmodifiableMap(taskHashMap);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a task with given id, or null if the task is not found.
|
||||
*/
|
||||
public Task getTask(long id) {
|
||||
Task task = tasks.get(id);
|
||||
if (task != null) {
|
||||
return task;
|
||||
} else {
|
||||
return getCancellableTask(id);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a cancellable task with given id, or null if the task is not found.
|
||||
*/
|
||||
public CancellableTask getCancellableTask(long id) {
|
||||
CancellableTaskHolder holder = cancellableTasks.get(id);
|
||||
if (holder != null) {
|
||||
return holder.getTask();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of currently banned tasks.
|
||||
* <p>
|
||||
* Will be used in task manager stats and for debugging.
|
||||
*/
|
||||
public int getBanCount() {
|
||||
return banedParents.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Bans all tasks with the specified parent task from execution, cancels all tasks that are currently executing.
|
||||
* <p>
|
||||
* This method is called when a parent task that has children is cancelled.
|
||||
*/
|
||||
public void setBan(String parentNode, long parentId, String reason) {
|
||||
logger.trace("setting ban for the parent task {}:{} {}", parentNode, parentId, reason);
|
||||
|
||||
// Set the ban first, so the newly created tasks cannot be registered
|
||||
Tuple<String, Long> ban = new Tuple<>(parentNode, parentId);
|
||||
synchronized (banedParents) {
|
||||
if (lastDiscoveryNodes.nodeExists(parentNode)) {
|
||||
// Only set the ban if the node is the part of the cluster
|
||||
banedParents.put(ban, reason);
|
||||
}
|
||||
}
|
||||
|
||||
// Now go through already running tasks and cancel them
|
||||
for (Map.Entry<Long, CancellableTaskHolder> taskEntry : cancellableTasks.entrySet()) {
|
||||
CancellableTaskHolder holder = taskEntry.getValue();
|
||||
if (holder.hasParent(parentNode, parentId)) {
|
||||
holder.cancel(reason);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes the ban for the specified parent task.
|
||||
* <p>
|
||||
* This method is called when a previously banned task finally cancelled
|
||||
*/
|
||||
public void removeBan(String parentNode, long parentId) {
|
||||
logger.trace("removing ban for the parent task {}:{} {}", parentNode, parentId);
|
||||
banedParents.remove(new Tuple<>(parentNode, parentId));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
if (event.nodesRemoved()) {
|
||||
synchronized (banedParents) {
|
||||
lastDiscoveryNodes = event.state().getNodes();
|
||||
// Remove all bans that were registered by nodes that are no longer in the cluster state
|
||||
Iterator<Tuple<String, Long>> banIterator = banedParents.keySet().iterator();
|
||||
while (banIterator.hasNext()) {
|
||||
Tuple<String, Long> nodeAndTaskId = banIterator.next();
|
||||
String nodeId = nodeAndTaskId.v1();
|
||||
Long taskId = nodeAndTaskId.v2();
|
||||
if (lastDiscoveryNodes.nodeExists(nodeId) == false) {
|
||||
logger.debug("Removing ban for the parent [{}:{}] on the node [{}], reason: the parent node is gone", nodeId,
|
||||
taskId, event.state().getNodes().localNode());
|
||||
banIterator.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
// Cancel cancellable tasks for the nodes that are gone
|
||||
for (Map.Entry<Long, CancellableTaskHolder> taskEntry : cancellableTasks.entrySet()) {
|
||||
CancellableTaskHolder holder = taskEntry.getValue();
|
||||
CancellableTask task = holder.getTask();
|
||||
String parent = task.getParentNode();
|
||||
if (parent != null && lastDiscoveryNodes.nodeExists(parent) == false) {
|
||||
if (task.cancelOnParentLeaving()) {
|
||||
holder.cancel("Coordinating node [" + parent + "] left the cluster");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void registerChildTask(Task task, String node) {
|
||||
if (task == null || task instanceof CancellableTask == false) {
|
||||
// We don't have a cancellable task - not much we can do here
|
||||
return;
|
||||
}
|
||||
CancellableTaskHolder holder = cancellableTasks.get(task.getId());
|
||||
if (holder != null) {
|
||||
holder.registerChildTaskNode(node);
|
||||
}
|
||||
}
|
||||
|
||||
private static class CancellableTaskHolder {
|
||||
|
||||
private static final String TASK_FINISHED_MARKER = "task finished";
|
||||
|
||||
private final CancellableTask task;
|
||||
|
||||
private final Set<String> nodesWithChildTasks = new HashSet<>();
|
||||
|
||||
private volatile String cancellationReason = null;
|
||||
|
||||
private volatile Consumer<Set<String>> cancellationListener = null;
|
||||
|
||||
public CancellableTaskHolder(CancellableTask task) {
|
||||
this.task = task;
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks task as cancelled.
|
||||
* <p>
|
||||
* Returns a set of nodes with child tasks where this task should be cancelled if cancellation was successful, null otherwise.
|
||||
*/
|
||||
public Set<String> cancel(String reason, Consumer<Set<String>> listener) {
|
||||
Set<String> nodes;
|
||||
synchronized (this) {
|
||||
assert reason != null;
|
||||
if (cancellationReason == null) {
|
||||
cancellationReason = reason;
|
||||
cancellationListener = listener;
|
||||
nodes = Collections.unmodifiableSet(nodesWithChildTasks);
|
||||
} else {
|
||||
// Already cancelled by somebody else
|
||||
nodes = null;
|
||||
}
|
||||
}
|
||||
if (nodes != null) {
|
||||
task.cancel(reason);
|
||||
}
|
||||
return nodes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks task as cancelled.
|
||||
* <p>
|
||||
* Returns a set of nodes with child tasks where this task should be cancelled if cancellation was successful, null otherwise.
|
||||
*/
|
||||
public Set<String> cancel(String reason) {
|
||||
return cancel(reason, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks task as finished.
|
||||
*/
|
||||
public void finish() {
|
||||
Consumer<Set<String>> listener = null;
|
||||
Set<String> nodes = null;
|
||||
synchronized (this) {
|
||||
if (cancellationReason != null) {
|
||||
// The task was cancelled, we need to notify the listener
|
||||
if (cancellationListener != null) {
|
||||
listener = cancellationListener;
|
||||
nodes = Collections.unmodifiableSet(nodesWithChildTasks);
|
||||
cancellationListener = null;
|
||||
}
|
||||
} else {
|
||||
cancellationReason = TASK_FINISHED_MARKER;
|
||||
}
|
||||
}
|
||||
// We need to call the listener outside of the synchronised section to avoid potential bottle necks
|
||||
// in the listener synchronization
|
||||
if (listener != null) {
|
||||
listener.accept(nodes);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public boolean hasParent(String parentNode, long parentId) {
|
||||
return parentId == task.getParentId() && parentNode.equals(task.getParentNode());
|
||||
}
|
||||
|
||||
public CancellableTask getTask() {
|
||||
return task;
|
||||
}
|
||||
|
||||
public synchronized void registerChildTaskNode(String nodeId) {
|
||||
if (cancellationReason == null) {
|
||||
nodesWithChildTasks.add(nodeId);
|
||||
} else {
|
||||
throw new IllegalStateException("cannot register child task request, the task is already cancelled");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -33,14 +33,20 @@ public abstract class TransportRequest extends TransportMessage<TransportRequest
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the task object that should be used to keep track of the processing of the request.
|
||||
*
|
||||
* A request can override this method and return null to avoid being tracked by the task manager.
|
||||
*/
|
||||
public Task createTask(long id, String type, String action) {
|
||||
return new Task(id, type, action, this::getDescription);
|
||||
return new Task(id, type, action, getDescription());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns optional description of the request to be displayed by the task manager
|
||||
*/
|
||||
public String getDescription() {
|
||||
return this.toString();
|
||||
return "";
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,384 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.RandomizedContext;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.CancellableTask;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
|
||||
public class CancellableTasksTests extends TaskManagerTestCase {
|
||||
|
||||
public static class CancellableNodeRequest extends BaseNodeRequest {
|
||||
protected String requestName;
|
||||
protected String nodeId;
|
||||
|
||||
public CancellableNodeRequest() {
|
||||
super();
|
||||
}
|
||||
|
||||
public CancellableNodeRequest(CancellableNodesRequest request, String nodeId) {
|
||||
super(nodeId);
|
||||
requestName = request.requestName;
|
||||
this.nodeId = nodeId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
requestName = in.readString();
|
||||
nodeId = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(requestName);
|
||||
out.writeString(nodeId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDescription() {
|
||||
return "CancellableNodeRequest[" + requestName + ", " + nodeId + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
|
||||
return new CancellableTask(id, type, action, getDescription(), parentTaskNode, parentTaskId);
|
||||
}
|
||||
}
|
||||
|
||||
public static class CancellableNodesRequest extends BaseNodesRequest<CancellableNodesRequest> {
|
||||
private String requestName;
|
||||
|
||||
private CancellableNodesRequest() {
|
||||
super();
|
||||
}
|
||||
|
||||
public CancellableNodesRequest(String requestName, String... nodesIds) {
|
||||
super(nodesIds);
|
||||
this.requestName = requestName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
requestName = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(requestName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDescription() {
|
||||
return "CancellableNodesRequest[" + requestName + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action) {
|
||||
return new CancellableTask(id, type, action, getDescription());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Simulates a cancellable node-based task that can be used to block node tasks so they are guaranteed to be registered by task manager
|
||||
*/
|
||||
class CancellableTestNodesAction extends AbstractTestNodesAction<CancellableNodesRequest, CancellableNodeRequest> {
|
||||
|
||||
// True if the node operation should get stuck until its cancelled
|
||||
final boolean shouldBlock;
|
||||
|
||||
final CountDownLatch actionStartedLatch;
|
||||
|
||||
CancellableTestNodesAction(Settings settings, String actionName, ClusterName clusterName, ThreadPool threadPool,
|
||||
ClusterService clusterService, TransportService transportService, boolean shouldBlock, CountDownLatch
|
||||
actionStartedLatch) {
|
||||
super(settings, actionName, clusterName, threadPool, clusterService, transportService, CancellableNodesRequest::new,
|
||||
CancellableNodeRequest::new);
|
||||
this.shouldBlock = shouldBlock;
|
||||
this.actionStartedLatch = actionStartedLatch;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CancellableNodeRequest newNodeRequest(String nodeId, CancellableNodesRequest request) {
|
||||
return new CancellableNodeRequest(request, nodeId);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeResponse nodeOperation(CancellableNodeRequest request, Task task) {
|
||||
assert task instanceof CancellableTask;
|
||||
debugDelay(request.nodeId, "op1");
|
||||
if (actionStartedLatch != null) {
|
||||
actionStartedLatch.countDown();
|
||||
}
|
||||
|
||||
debugDelay(request.nodeId, "op2");
|
||||
if (shouldBlock) {
|
||||
// Simulate a job that takes forever to finish
|
||||
// Using periodic checks method to identify that the task was cancelled
|
||||
try {
|
||||
awaitBusy(() -> {
|
||||
if (((CancellableTask) task).isCancelled()) {
|
||||
throw new RuntimeException("Cancelled");
|
||||
}
|
||||
return false;
|
||||
});
|
||||
fail("It should have thrown an exception");
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
debugDelay(request.nodeId, "op4");
|
||||
|
||||
return new NodeResponse(clusterService.localNode());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeResponse nodeOperation(CancellableNodeRequest request) {
|
||||
throw new UnsupportedOperationException("the task parameter is required");
|
||||
}
|
||||
}
|
||||
|
||||
private Task startCancellableTestNodesAction(boolean waitForActionToStart, int blockedNodesCount, ActionListener<NodesResponse>
|
||||
listener) throws InterruptedException {
|
||||
return startCancellableTestNodesAction(waitForActionToStart, randomSubsetOf(blockedNodesCount, testNodes), new
|
||||
CancellableNodesRequest("Test Request"), listener);
|
||||
}
|
||||
|
||||
private Task startCancellableTestNodesAction(boolean waitForActionToStart, Collection<TestNode> blockOnNodes, CancellableNodesRequest
|
||||
request, ActionListener<NodesResponse> listener) throws InterruptedException {
|
||||
CountDownLatch actionLatch = waitForActionToStart ? new CountDownLatch(nodesCount) : null;
|
||||
CancellableTestNodesAction[] actions = new CancellableTestNodesAction[nodesCount];
|
||||
for (int i = 0; i < testNodes.length; i++) {
|
||||
boolean shouldBlock = blockOnNodes.contains(testNodes[i]);
|
||||
logger.info("The action in the node [{}] should block: [{}]", testNodes[i].discoveryNode.getId(), shouldBlock);
|
||||
actions[i] = new CancellableTestNodesAction(Settings.EMPTY, "testAction", clusterName, threadPool, testNodes[i]
|
||||
.clusterService, testNodes[i].transportService, shouldBlock, actionLatch);
|
||||
}
|
||||
Task task = actions[0].execute(request, listener);
|
||||
if (waitForActionToStart) {
|
||||
logger.info("Awaiting for all actions to start");
|
||||
actionLatch.await();
|
||||
logger.info("Done waiting for all actions to start");
|
||||
}
|
||||
return task;
|
||||
}
|
||||
|
||||
public void testBasicTaskCancellation() throws Exception {
|
||||
setupTestNodes(Settings.EMPTY);
|
||||
connectNodes(testNodes);
|
||||
CountDownLatch responseLatch = new CountDownLatch(1);
|
||||
boolean waitForActionToStart = randomBoolean();
|
||||
logger.info("waitForActionToStart is set to {}", waitForActionToStart);
|
||||
final AtomicReference<NodesResponse> responseReference = new AtomicReference<>();
|
||||
final AtomicReference<Throwable> throwableReference = new AtomicReference<>();
|
||||
int blockedNodesCount = randomIntBetween(0, nodesCount);
|
||||
Task mainTask = startCancellableTestNodesAction(waitForActionToStart, blockedNodesCount, new ActionListener<NodesResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesResponse listTasksResponse) {
|
||||
responseReference.set(listTasksResponse);
|
||||
responseLatch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
throwableReference.set(e);
|
||||
responseLatch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
// Cancel main task
|
||||
CancelTasksRequest request = new CancelTasksRequest(testNodes[0].discoveryNode.getId());
|
||||
request.reason("Testing Cancellation");
|
||||
request.taskId(mainTask.getId());
|
||||
// And send the cancellation request to a random node
|
||||
CancelTasksResponse response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request)
|
||||
.get();
|
||||
|
||||
// Awaiting for the main task to finish
|
||||
responseLatch.await();
|
||||
|
||||
if (response.getTasks().size() == 0) {
|
||||
// We didn't cancel the request and it finished successfully
|
||||
// That should be rare and can be only in case we didn't block on a single node
|
||||
assertEquals(0, blockedNodesCount);
|
||||
// Make sure that the request was successful
|
||||
assertNull(throwableReference.get());
|
||||
assertNotNull(responseReference.get());
|
||||
assertEquals(nodesCount, responseReference.get().getNodes().length);
|
||||
assertEquals(0, responseReference.get().failureCount());
|
||||
} else {
|
||||
// We canceled the request, in this case it should have fail, but we should get partial response
|
||||
assertNull(throwableReference.get());
|
||||
assertEquals(nodesCount, responseReference.get().failureCount() + responseReference.get().getNodes().length);
|
||||
// and we should have at least as many failures as the number of blocked operations
|
||||
// (we might have cancelled some non-blocked operations before they even started and that's ok)
|
||||
assertThat(responseReference.get().failureCount(), greaterThanOrEqualTo(blockedNodesCount));
|
||||
|
||||
// We should have the information about the cancelled task in the cancel operation response
|
||||
assertEquals(1, response.getTasks().size());
|
||||
assertEquals(mainTask.getId(), response.getTasks().get(0).getId());
|
||||
}
|
||||
|
||||
// Make sure that tasks are no longer running
|
||||
ListTasksResponse listTasksResponse = testNodes[randomIntBetween(0, testNodes.length - 1)]
|
||||
.transportListTasksAction.execute(new ListTasksRequest(testNodes[0].discoveryNode.getId()).taskId(mainTask.getId())).get();
|
||||
assertEquals(0, listTasksResponse.getTasks().size());
|
||||
|
||||
// Make sure that there are no leftover bans, the ban removal is async, so we might return from the cancellation
|
||||
// while the ban is still there, but it should disappear shortly
|
||||
assertBusy(() -> {
|
||||
for (int i = 0; i < testNodes.length; i++) {
|
||||
assertEquals("No bans on the node " + i, 0, testNodes[i].transportService.getTaskManager().getBanCount());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void testTaskCancellationOnCoordinatingNodeLeavingTheCluster() throws Exception {
|
||||
setupTestNodes(Settings.EMPTY);
|
||||
connectNodes(testNodes);
|
||||
CountDownLatch responseLatch = new CountDownLatch(1);
|
||||
boolean simulateBanBeforeLeaving = randomBoolean();
|
||||
final AtomicReference<NodesResponse> responseReference = new AtomicReference<>();
|
||||
final AtomicReference<Throwable> throwableReference = new AtomicReference<>();
|
||||
int blockedNodesCount = randomIntBetween(0, nodesCount - 1);
|
||||
|
||||
// We shouldn't block on the first node since it's leaving the cluster anyway so it doesn't matter
|
||||
List<TestNode> blockOnNodes = randomSubsetOf(blockedNodesCount, Arrays.copyOfRange(testNodes, 1, nodesCount));
|
||||
Task mainTask = startCancellableTestNodesAction(true, blockOnNodes, new CancellableNodesRequest("Test Request"), new
|
||||
ActionListener<NodesResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesResponse listTasksResponse) {
|
||||
responseReference.set(listTasksResponse);
|
||||
responseLatch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
throwableReference.set(e);
|
||||
responseLatch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
String mainNode = testNodes[0].discoveryNode.getId();
|
||||
|
||||
// Make sure that tasks are running
|
||||
ListTasksResponse listTasksResponse = testNodes[randomIntBetween(0, testNodes.length - 1)]
|
||||
.transportListTasksAction.execute(new ListTasksRequest().parentNode(mainNode).taskId(mainTask.getId())).get();
|
||||
assertThat(listTasksResponse.getTasks().size(), greaterThanOrEqualTo(blockOnNodes.size()));
|
||||
|
||||
// Simulate the coordinating node leaving the cluster
|
||||
DiscoveryNode[] discoveryNodes = new DiscoveryNode[testNodes.length - 1];
|
||||
for (int i = 1; i < testNodes.length; i++) {
|
||||
discoveryNodes[i - 1] = testNodes[i].discoveryNode;
|
||||
}
|
||||
DiscoveryNode master = discoveryNodes[0];
|
||||
for (int i = 1; i < testNodes.length; i++) {
|
||||
// Notify only nodes that should remain in the cluster
|
||||
testNodes[i].clusterService.setState(ClusterStateCreationUtils.state(testNodes[i].discoveryNode, master, discoveryNodes));
|
||||
}
|
||||
|
||||
if (simulateBanBeforeLeaving) {
|
||||
logger.info("--> Simulate issuing cancel request on the node that is about to leave the cluster");
|
||||
// Simulate issuing cancel request on the node that is about to leave the cluster
|
||||
CancelTasksRequest request = new CancelTasksRequest(testNodes[0].discoveryNode.getId());
|
||||
request.reason("Testing Cancellation");
|
||||
request.taskId(mainTask.getId());
|
||||
// And send the cancellation request to a random node
|
||||
CancelTasksResponse response = testNodes[0].transportCancelTasksAction.execute(request).get();
|
||||
logger.info("--> Done simulating issuing cancel request on the node that is about to leave the cluster");
|
||||
// This node still thinks that's part of the cluster, so cancelling should look successful
|
||||
if (response.getTasks().size() == 0) {
|
||||
logger.error("!!!!");
|
||||
}
|
||||
assertThat(response.getTasks().size(), lessThanOrEqualTo(1));
|
||||
assertThat(response.getTaskFailures().size(), lessThanOrEqualTo(1));
|
||||
assertThat(response.getTaskFailures().size() + response.getTasks().size(), lessThanOrEqualTo(1));
|
||||
}
|
||||
|
||||
for (int i = 1; i < testNodes.length; i++) {
|
||||
assertEquals("No bans on the node " + i, 0, testNodes[i].transportService.getTaskManager().getBanCount());
|
||||
}
|
||||
|
||||
// Close the first node
|
||||
testNodes[0].close();
|
||||
|
||||
assertBusy(() -> {
|
||||
// Make sure that tasks are no longer running
|
||||
try {
|
||||
ListTasksResponse listTasksResponse1 = testNodes[randomIntBetween(1, testNodes.length - 1)]
|
||||
.transportListTasksAction.execute(new ListTasksRequest().parentNode(mainNode).taskId(mainTask.getId())).get();
|
||||
assertEquals(0, listTasksResponse1.getTasks().size());
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
} catch (ExecutionException ex2) {
|
||||
fail("shouldn't be here");
|
||||
}
|
||||
});
|
||||
|
||||
// Wait for clean up
|
||||
responseLatch.await();
|
||||
|
||||
}
|
||||
|
||||
private static void debugDelay(String nodeId, String name) {
|
||||
// Introduce an additional pseudo random repeatable race conditions
|
||||
String delayName = RandomizedContext.current().getRunnerSeedAsString() + ":" + nodeId + ":" + name;
|
||||
Random random = new Random(delayName.hashCode());
|
||||
if (RandomInts.randomIntBetween(random, 0, 10) < 1) {
|
||||
try {
|
||||
Thread.sleep(RandomInts.randomIntBetween(random, 20, 50));
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,245 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
|
||||
import org.elasticsearch.action.support.nodes.TransportNodesAction;
|
||||
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.cluster.TestClusterService;
|
||||
import org.elasticsearch.test.tasks.MockTaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.local.LocalTransport;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* The test case for unit testing task manager and related transport actions
|
||||
*/
|
||||
public abstract class TaskManagerTestCase extends ESTestCase {
|
||||
|
||||
protected static ThreadPool threadPool;
|
||||
public static final ClusterName clusterName = new ClusterName("test-cluster");
|
||||
protected TestNode[] testNodes;
|
||||
protected int nodesCount;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
threadPool = new ThreadPool(TransportTasksActionTests.class.getSimpleName());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() {
|
||||
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
|
||||
threadPool = null;
|
||||
}
|
||||
|
||||
public void setupTestNodes(Settings settings) {
|
||||
nodesCount = randomIntBetween(2, 10);
|
||||
testNodes = new TestNode[nodesCount];
|
||||
for (int i = 0; i < testNodes.length; i++) {
|
||||
testNodes[i] = new TestNode("node" + i, threadPool, settings);
|
||||
;
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public final void shutdownTestNodes() throws Exception {
|
||||
for (TestNode testNode : testNodes) {
|
||||
testNode.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static class NodeResponse extends BaseNodeResponse {
|
||||
|
||||
protected NodeResponse() {
|
||||
super();
|
||||
}
|
||||
|
||||
protected NodeResponse(DiscoveryNode node) {
|
||||
super(node);
|
||||
}
|
||||
}
|
||||
|
||||
static class NodesResponse extends BaseNodesResponse<NodeResponse> {
|
||||
|
||||
private int failureCount;
|
||||
|
||||
protected NodesResponse(ClusterName clusterName, NodeResponse[] nodes, int failureCount) {
|
||||
super(clusterName, nodes);
|
||||
this.failureCount = failureCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
failureCount = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(failureCount);
|
||||
}
|
||||
|
||||
public int failureCount() {
|
||||
return failureCount;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Simulates node-based task that can be used to block node tasks so they are guaranteed to be registered by task manager
|
||||
*/
|
||||
abstract class AbstractTestNodesAction<NodesRequest extends BaseNodesRequest<NodesRequest>, NodeRequest extends BaseNodeRequest>
|
||||
extends TransportNodesAction<NodesRequest, NodesResponse, NodeRequest, NodeResponse> {
|
||||
|
||||
AbstractTestNodesAction(Settings settings, String actionName, ClusterName clusterName, ThreadPool threadPool,
|
||||
ClusterService clusterService, TransportService transportService, Supplier<NodesRequest> request,
|
||||
Supplier<NodeRequest> nodeRequest) {
|
||||
super(settings, actionName, clusterName, threadPool, clusterService, transportService,
|
||||
new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY),
|
||||
request, nodeRequest, ThreadPool.Names.GENERIC);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodesResponse newResponse(NodesRequest request, AtomicReferenceArray responses) {
|
||||
final List<NodeResponse> nodesList = new ArrayList<>();
|
||||
int failureCount = 0;
|
||||
for (int i = 0; i < responses.length(); i++) {
|
||||
Object resp = responses.get(i);
|
||||
if (resp instanceof NodeResponse) { // will also filter out null response for unallocated ones
|
||||
nodesList.add((NodeResponse) resp);
|
||||
} else if (resp instanceof FailedNodeException) {
|
||||
failureCount++;
|
||||
} else {
|
||||
logger.warn("unknown response type [{}], expected NodeLocalGatewayMetaState or FailedNodeException", resp);
|
||||
}
|
||||
}
|
||||
return new NodesResponse(clusterName, nodesList.toArray(new NodeResponse[nodesList.size()]), failureCount);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeResponse newNodeResponse() {
|
||||
return new NodeResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected abstract NodeResponse nodeOperation(NodeRequest request);
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static class TestNode implements Releasable {
|
||||
public TestNode(String name, ThreadPool threadPool, Settings settings) {
|
||||
transportService = new TransportService(settings,
|
||||
new LocalTransport(settings, threadPool, Version.CURRENT, new NamedWriteableRegistry()),
|
||||
threadPool, new NamedWriteableRegistry()) {
|
||||
@Override
|
||||
protected TaskManager createTaskManager() {
|
||||
if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) {
|
||||
return new MockTaskManager(settings);
|
||||
} else {
|
||||
return super.createTaskManager();
|
||||
}
|
||||
}
|
||||
};
|
||||
transportService.start();
|
||||
clusterService = new TestClusterService(threadPool, transportService);
|
||||
clusterService.add(transportService.getTaskManager());
|
||||
discoveryNode = new DiscoveryNode(name, transportService.boundAddress().publishAddress(), Version.CURRENT);
|
||||
IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings);
|
||||
ActionFilters actionFilters = new ActionFilters(Collections.emptySet());
|
||||
transportListTasksAction = new TransportListTasksAction(settings, clusterName, threadPool, clusterService, transportService,
|
||||
actionFilters, indexNameExpressionResolver);
|
||||
transportCancelTasksAction = new TransportCancelTasksAction(settings, clusterName, threadPool, clusterService, transportService,
|
||||
actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
public final TestClusterService clusterService;
|
||||
public final TransportService transportService;
|
||||
public final DiscoveryNode discoveryNode;
|
||||
public final TransportListTasksAction transportListTasksAction;
|
||||
public final TransportCancelTasksAction transportCancelTasksAction;
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
transportService.close();
|
||||
}
|
||||
}
|
||||
|
||||
public static void connectNodes(TestNode... nodes) {
|
||||
DiscoveryNode[] discoveryNodes = new DiscoveryNode[nodes.length];
|
||||
for (int i = 0; i < nodes.length; i++) {
|
||||
discoveryNodes[i] = nodes[i].discoveryNode;
|
||||
}
|
||||
DiscoveryNode master = discoveryNodes[0];
|
||||
for (TestNode node : nodes) {
|
||||
node.clusterService.setState(ClusterStateCreationUtils.state(node.discoveryNode, master, discoveryNodes));
|
||||
}
|
||||
for (TestNode nodeA : nodes) {
|
||||
for (TestNode nodeB : nodes) {
|
||||
nodeA.transportService.connectToNode(nodeB.discoveryNode);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static RecordingTaskManagerListener[] setupListeners(TestNode[] nodes, String... actionMasks) {
|
||||
RecordingTaskManagerListener[] listeners = new RecordingTaskManagerListener[nodes.length];
|
||||
for (int i = 0; i < nodes.length; i++) {
|
||||
listeners[i] = new RecordingTaskManagerListener(nodes[i].discoveryNode, actionMasks);
|
||||
((MockTaskManager) (nodes[i].clusterService.getTaskManager())).addListener(listeners[i]);
|
||||
}
|
||||
return listeners;
|
||||
}
|
||||
|
||||
}
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.cluster.node.tasks;
|
|||
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
|
||||
|
@ -68,7 +69,12 @@ public class TasksIT extends ESIntegTestCase {
|
|||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return pluginList(MockTransportService.TestPlugin.class);
|
||||
return pluginList(MockTransportService.TestPlugin.class, TestTaskPlugin.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> transportClientPlugins() {
|
||||
return nodePlugins();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -279,6 +285,39 @@ public class TasksIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testTasksCancellation() throws Exception {
|
||||
// Start blocking test task
|
||||
// Get real client (the plugin is not registered on transport nodes)
|
||||
ListenableActionFuture<TestTaskPlugin.NodesResponse> future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client()).execute();
|
||||
logger.info("--> started test tasks");
|
||||
|
||||
// Wait for the task to start on all nodes
|
||||
assertBusy(() -> assertEquals(internalCluster().numDataAndMasterNodes(),
|
||||
client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size()));
|
||||
|
||||
logger.info("--> cancelling the main test task");
|
||||
CancelTasksResponse cancelTasksResponse = client().admin().cluster().prepareCancelTasks().setActions(TestTaskPlugin.TestTaskAction.NAME).get();
|
||||
assertEquals(1, cancelTasksResponse.getTasks().size());
|
||||
|
||||
future.get();
|
||||
|
||||
logger.info("--> checking that test tasks are not running");
|
||||
assertEquals(0, client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "*").get().getTasks().size());
|
||||
|
||||
}
|
||||
|
||||
public void testTasksUnblocking() throws Exception {
|
||||
// Start blocking test task
|
||||
ListenableActionFuture<TestTaskPlugin.NodesResponse> future = TestTaskPlugin.TestTaskAction.INSTANCE.newRequestBuilder(client()).execute();
|
||||
// Wait for the task to start on all nodes
|
||||
assertBusy(() -> assertEquals(internalCluster().numDataAndMasterNodes(),
|
||||
client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size()));
|
||||
|
||||
TestTaskPlugin.UnblockTestTasksAction.INSTANCE.newRequestBuilder(client()).get();
|
||||
|
||||
future.get();
|
||||
assertEquals(0, client().admin().cluster().prepareListTasks().setActions(TestTaskPlugin.TestTaskAction.NAME + "[n]").get().getTasks().size());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
|
|
|
@ -0,0 +1,454 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionModule;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
|
||||
import org.elasticsearch.action.support.nodes.TransportNodesAction;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
|
||||
import org.elasticsearch.action.support.tasks.TransportTasksAction;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.tasks.CancellableTask;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static org.elasticsearch.test.ESTestCase.awaitBusy;
|
||||
|
||||
/**
|
||||
* A plugin that adds a cancellable blocking test task of integration testing of the task manager.
|
||||
*/
|
||||
public class TestTaskPlugin extends Plugin {
|
||||
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return "test-task-plugin";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "Test plugin for testing task management";
|
||||
}
|
||||
|
||||
public void onModule(ActionModule module) {
|
||||
module.registerAction(TestTaskAction.INSTANCE, TransportTestTaskAction.class);
|
||||
module.registerAction(UnblockTestTasksAction.INSTANCE, TransportUnblockTestTasksAction.class);
|
||||
}
|
||||
|
||||
static class TestTask extends CancellableTask {
|
||||
|
||||
private volatile boolean blocked = true;
|
||||
|
||||
public TestTask(long id, String type, String action, String description, String parentNode, long parentId) {
|
||||
super(id, type, action, description, parentNode, parentId);
|
||||
}
|
||||
|
||||
public boolean isBlocked() {
|
||||
return blocked;
|
||||
}
|
||||
|
||||
public void unblock() {
|
||||
blocked = false;
|
||||
}
|
||||
}
|
||||
|
||||
public static class NodeResponse extends BaseNodeResponse {
|
||||
|
||||
protected NodeResponse() {
|
||||
super();
|
||||
}
|
||||
|
||||
public NodeResponse(DiscoveryNode node) {
|
||||
super(node);
|
||||
}
|
||||
}
|
||||
|
||||
public static class NodesResponse extends BaseNodesResponse<NodeResponse> {
|
||||
|
||||
private int failureCount;
|
||||
|
||||
NodesResponse() {
|
||||
|
||||
}
|
||||
|
||||
public NodesResponse(ClusterName clusterName, NodeResponse[] nodes, int failureCount) {
|
||||
super(clusterName, nodes);
|
||||
this.failureCount = failureCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
failureCount = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(failureCount);
|
||||
}
|
||||
|
||||
public int failureCount() {
|
||||
return failureCount;
|
||||
}
|
||||
}
|
||||
|
||||
public static class NodeRequest extends BaseNodeRequest {
|
||||
protected String requestName;
|
||||
protected String nodeId;
|
||||
|
||||
public NodeRequest() {
|
||||
super();
|
||||
}
|
||||
|
||||
public NodeRequest(NodesRequest request, String nodeId) {
|
||||
super(nodeId);
|
||||
requestName = request.requestName;
|
||||
this.nodeId = nodeId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
requestName = in.readString();
|
||||
nodeId = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(requestName);
|
||||
out.writeString(nodeId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDescription() {
|
||||
return "NodeRequest[" + requestName + ", " + nodeId + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
|
||||
return new TestTask(id, type, action, this.getDescription(), parentTaskNode, parentTaskId);
|
||||
}
|
||||
}
|
||||
|
||||
public static class NodesRequest extends BaseNodesRequest<NodesRequest> {
|
||||
private String requestName;
|
||||
|
||||
NodesRequest() {
|
||||
super();
|
||||
}
|
||||
|
||||
public NodesRequest(String requestName, String... nodesIds) {
|
||||
super(nodesIds);
|
||||
this.requestName = requestName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
requestName = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(requestName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDescription() {
|
||||
return "NodesRequest[" + requestName + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action) {
|
||||
return new CancellableTask(id, type, action, getDescription());
|
||||
}
|
||||
}
|
||||
|
||||
public static class TransportTestTaskAction extends TransportNodesAction<NodesRequest, NodesResponse, NodeRequest, NodeResponse> {
|
||||
|
||||
@Inject
|
||||
public TransportTestTaskAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
|
||||
ClusterService clusterService, TransportService transportService) {
|
||||
super(settings, TestTaskAction.NAME, clusterName, threadPool, clusterService, transportService,
|
||||
new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY),
|
||||
NodesRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodesResponse newResponse(NodesRequest request, AtomicReferenceArray responses) {
|
||||
final List<NodeResponse> nodesList = new ArrayList<>();
|
||||
int failureCount = 0;
|
||||
for (int i = 0; i < responses.length(); i++) {
|
||||
Object resp = responses.get(i);
|
||||
if (resp instanceof NodeResponse) { // will also filter out null response for unallocated ones
|
||||
nodesList.add((NodeResponse) resp);
|
||||
} else if (resp instanceof FailedNodeException) {
|
||||
failureCount++;
|
||||
} else {
|
||||
logger.warn("unknown response type [{}], expected NodeLocalGatewayMetaState or FailedNodeException", resp);
|
||||
}
|
||||
}
|
||||
return new NodesResponse(clusterName, nodesList.toArray(new NodeResponse[nodesList.size()]), failureCount);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
|
||||
List<String> list = new ArrayList<>();
|
||||
for (String node : nodesIds) {
|
||||
if (nodes.getDataNodes().containsKey(node)) {
|
||||
list.add(node);
|
||||
}
|
||||
}
|
||||
return list.toArray(new String[list.size()]);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeRequest newNodeRequest(String nodeId, NodesRequest request) {
|
||||
return new NodeRequest(request, nodeId);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeResponse newNodeResponse() {
|
||||
return new NodeResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, NodesRequest request, ActionListener<NodesResponse> listener) {
|
||||
super.doExecute(task, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeResponse nodeOperation(NodeRequest request, Task task) {
|
||||
logger.info("Test task started on the node {}", clusterService.localNode());
|
||||
try {
|
||||
awaitBusy(() -> {
|
||||
if (((CancellableTask) task).isCancelled()) {
|
||||
throw new RuntimeException("Cancelled!");
|
||||
}
|
||||
return ((TestTask) task).isBlocked() == false;
|
||||
});
|
||||
} catch (InterruptedException ex) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
logger.info("Test task finished on the node {}", clusterService.localNode());
|
||||
return new NodeResponse(clusterService.localNode());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodeResponse nodeOperation(NodeRequest request) {
|
||||
throw new UnsupportedOperationException("the task parameter is required");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public static class TestTaskAction extends Action<NodesRequest, NodesResponse, NodesRequestBuilder> {
|
||||
|
||||
public static final TestTaskAction INSTANCE = new TestTaskAction();
|
||||
public static final String NAME = "cluster:admin/tasks/test";
|
||||
|
||||
private TestTaskAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodesResponse newResponse() {
|
||||
return new NodesResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodesRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new NodesRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
||||
public static class NodesRequestBuilder extends ActionRequestBuilder<NodesRequest, NodesResponse, NodesRequestBuilder> {
|
||||
|
||||
protected NodesRequestBuilder(ElasticsearchClient client, Action<NodesRequest, NodesResponse, NodesRequestBuilder> action) {
|
||||
super(client, action, new NodesRequest("test"));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static class UnblockTestTaskResponse implements Writeable<UnblockTestTaskResponse> {
|
||||
|
||||
public UnblockTestTaskResponse() {
|
||||
|
||||
}
|
||||
|
||||
public UnblockTestTaskResponse(StreamInput in) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public UnblockTestTaskResponse readFrom(StreamInput in) throws IOException {
|
||||
return new UnblockTestTaskResponse(in);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static class UnblockTestTasksRequest extends BaseTasksRequest<UnblockTestTasksRequest> {
|
||||
|
||||
}
|
||||
|
||||
public static class UnblockTestTasksResponse extends BaseTasksResponse {
|
||||
|
||||
private List<UnblockTestTaskResponse> tasks;
|
||||
|
||||
public UnblockTestTasksResponse() {
|
||||
|
||||
}
|
||||
|
||||
public UnblockTestTasksResponse(List<UnblockTestTaskResponse> tasks, List<TaskOperationFailure> taskFailures, List<? extends
|
||||
FailedNodeException> nodeFailures) {
|
||||
super(taskFailures, nodeFailures);
|
||||
this.tasks = tasks == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(tasks));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int taskCount = in.readVInt();
|
||||
List<UnblockTestTaskResponse> builder = new ArrayList<>();
|
||||
for (int i = 0; i < taskCount; i++) {
|
||||
builder.add(new UnblockTestTaskResponse(in));
|
||||
}
|
||||
tasks = Collections.unmodifiableList(builder);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(tasks.size());
|
||||
for (UnblockTestTaskResponse task : tasks) {
|
||||
task.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test class for testing task operations
|
||||
*/
|
||||
public static class TransportUnblockTestTasksAction extends TransportTasksAction<Task, UnblockTestTasksRequest,
|
||||
UnblockTestTasksResponse, UnblockTestTaskResponse> {
|
||||
|
||||
@Inject
|
||||
public TransportUnblockTestTasksAction(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService
|
||||
clusterService,
|
||||
TransportService transportService) {
|
||||
super(settings, UnblockTestTasksAction.NAME, clusterName, threadPool, clusterService, transportService, new ActionFilters(new
|
||||
HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY),
|
||||
UnblockTestTasksRequest::new, UnblockTestTasksResponse::new, ThreadPool.Names.MANAGEMENT);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UnblockTestTasksResponse newResponse(UnblockTestTasksRequest request, List<UnblockTestTaskResponse> tasks,
|
||||
List<TaskOperationFailure> taskOperationFailures, List<FailedNodeException>
|
||||
failedNodeExceptions) {
|
||||
return new UnblockTestTasksResponse(tasks, taskOperationFailures, failedNodeExceptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UnblockTestTaskResponse readTaskResponse(StreamInput in) throws IOException {
|
||||
return new UnblockTestTaskResponse(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UnblockTestTaskResponse taskOperation(UnblockTestTasksRequest request, Task task) {
|
||||
((TestTask) task).unblock();
|
||||
return new UnblockTestTaskResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public static class UnblockTestTasksAction extends Action<UnblockTestTasksRequest, UnblockTestTasksResponse,
|
||||
UnblockTestTasksRequestBuilder> {
|
||||
|
||||
public static final UnblockTestTasksAction INSTANCE = new UnblockTestTasksAction();
|
||||
public static final String NAME = "cluster:admin/tasks/testunblock";
|
||||
|
||||
private UnblockTestTasksAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public UnblockTestTasksResponse newResponse() {
|
||||
return new UnblockTestTasksResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public UnblockTestTasksRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new UnblockTestTasksRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
||||
public static class UnblockTestTasksRequestBuilder extends ActionRequestBuilder<UnblockTestTasksRequest, UnblockTestTasksResponse,
|
||||
UnblockTestTasksRequestBuilder> {
|
||||
|
||||
protected UnblockTestTasksRequestBuilder(ElasticsearchClient client, Action<UnblockTestTasksRequest, UnblockTestTasksResponse,
|
||||
UnblockTestTasksRequestBuilder> action) {
|
||||
super(client, action, new UnblockTestTasksRequest());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -18,23 +18,19 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.cluster.node.tasks;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
|
||||
import org.elasticsearch.action.support.nodes.TransportNodesAction;
|
||||
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
|
||||
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
|
||||
import org.elasticsearch.action.support.tasks.TransportTasksAction;
|
||||
|
@ -42,16 +38,11 @@ import org.elasticsearch.cluster.ClusterName;
|
|||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.cluster.TestClusterService;
|
||||
import org.elasticsearch.test.tasks.MockTaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -70,102 +61,13 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static org.elasticsearch.action.support.PlainActionFuture.newFuture;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.endsWith;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public class TransportTasksActionTests extends ESTestCase {
|
||||
|
||||
private static ThreadPool threadPool;
|
||||
private static final ClusterName clusterName = new ClusterName("test-cluster");
|
||||
private TestNode[] testNodes;
|
||||
private int nodesCount;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
threadPool = new ThreadPool(TransportTasksActionTests.class.getSimpleName());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() {
|
||||
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
|
||||
threadPool = null;
|
||||
}
|
||||
|
||||
public void setupTestNodes(Settings settings) {
|
||||
nodesCount = randomIntBetween(2, 10);
|
||||
testNodes = new TestNode[nodesCount];
|
||||
for (int i = 0; i < testNodes.length; i++) {
|
||||
testNodes[i] = new TestNode("node" + i, threadPool, settings);
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public final void shutdownTestNodes() throws Exception {
|
||||
for (TestNode testNode : testNodes) {
|
||||
testNode.close();
|
||||
}
|
||||
}
|
||||
|
||||
private static class TestNode implements Releasable {
|
||||
public TestNode(String name, ThreadPool threadPool, Settings settings) {
|
||||
transportService = new TransportService(settings,
|
||||
new LocalTransport(settings, threadPool, Version.CURRENT, new NamedWriteableRegistry()),
|
||||
threadPool, new NamedWriteableRegistry()) {
|
||||
@Override
|
||||
protected TaskManager createTaskManager() {
|
||||
if (MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.get(settings)) {
|
||||
return new MockTaskManager(settings);
|
||||
} else {
|
||||
return super.createTaskManager();
|
||||
}
|
||||
}
|
||||
};
|
||||
transportService.start();
|
||||
clusterService = new TestClusterService(threadPool, transportService);
|
||||
discoveryNode = new DiscoveryNode(name, transportService.boundAddress().publishAddress(), Version.CURRENT);
|
||||
transportListTasksAction = new TransportListTasksAction(settings, clusterName, threadPool, clusterService, transportService,
|
||||
new ActionFilters(Collections.emptySet()), new IndexNameExpressionResolver(settings));
|
||||
}
|
||||
|
||||
public final TestClusterService clusterService;
|
||||
public final TransportService transportService;
|
||||
public final DiscoveryNode discoveryNode;
|
||||
public final TransportListTasksAction transportListTasksAction;
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
transportService.close();
|
||||
}
|
||||
}
|
||||
|
||||
public static void connectNodes(TestNode... nodes) {
|
||||
DiscoveryNode[] discoveryNodes = new DiscoveryNode[nodes.length];
|
||||
for (int i = 0; i < nodes.length; i++) {
|
||||
discoveryNodes[i] = nodes[i].discoveryNode;
|
||||
}
|
||||
DiscoveryNode master = discoveryNodes[0];
|
||||
for (TestNode node : nodes) {
|
||||
node.clusterService.setState(ClusterStateCreationUtils.state(node.discoveryNode, master, discoveryNodes));
|
||||
}
|
||||
for (TestNode nodeA : nodes) {
|
||||
for (TestNode nodeB : nodes) {
|
||||
nodeA.transportService.connectToNode(nodeB.discoveryNode);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static RecordingTaskManagerListener[] setupListeners(TestNode[] nodes, String... actionMasks) {
|
||||
RecordingTaskManagerListener[] listeners = new RecordingTaskManagerListener[nodes.length];
|
||||
for (int i = 0; i < nodes.length; i++) {
|
||||
listeners[i] = new RecordingTaskManagerListener(nodes[i].discoveryNode, actionMasks);
|
||||
((MockTaskManager)(nodes[i].clusterService.getTaskManager())).addListener(listeners[i]);
|
||||
}
|
||||
return listeners;
|
||||
}
|
||||
public class TransportTasksActionTests extends TaskManagerTestCase {
|
||||
|
||||
public static class NodeRequest extends BaseNodeRequest {
|
||||
protected String requestName;
|
||||
|
@ -197,13 +99,13 @@ public class TransportTasksActionTests extends ESTestCase {
|
|||
|
||||
@Override
|
||||
public String getDescription() {
|
||||
return "NodeRequest[" + requestName + ", " + enableTaskManager + "]";
|
||||
return "CancellableNodeRequest[" + requestName + ", " + enableTaskManager + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Task createTask(long id, String type, String action) {
|
||||
public Task createTask(long id, String type, String action, String parentTaskNode, long parentTaskId) {
|
||||
if (enableTaskManager) {
|
||||
return super.createTask(id, type, action);
|
||||
return super.createTask(id, type, action, parentTaskNode, parentTaskId);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
@ -214,7 +116,7 @@ public class TransportTasksActionTests extends ESTestCase {
|
|||
private String requestName;
|
||||
private boolean enableTaskManager;
|
||||
|
||||
private NodesRequest() {
|
||||
NodesRequest() {
|
||||
super();
|
||||
}
|
||||
|
||||
|
@ -244,7 +146,7 @@ public class TransportTasksActionTests extends ESTestCase {
|
|||
|
||||
@Override
|
||||
public String getDescription() {
|
||||
return "NodesRequest[" + requestName + ", " + enableTaskManager + "]";
|
||||
return "CancellableNodesRequest[" + requestName + ", " + enableTaskManager + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -257,70 +159,14 @@ public class TransportTasksActionTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
static class NodeResponse extends BaseNodeResponse {
|
||||
|
||||
protected NodeResponse() {
|
||||
super();
|
||||
}
|
||||
|
||||
protected NodeResponse(DiscoveryNode node) {
|
||||
super(node);
|
||||
}
|
||||
}
|
||||
|
||||
static class NodesResponse extends BaseNodesResponse<NodeResponse> {
|
||||
|
||||
private int failureCount;
|
||||
|
||||
protected NodesResponse(ClusterName clusterName, NodeResponse[] nodes, int failureCount) {
|
||||
super(clusterName, nodes);
|
||||
this.failureCount = failureCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
failureCount = in.readVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(failureCount);
|
||||
}
|
||||
|
||||
public int failureCount() {
|
||||
return failureCount;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Simulates node-based task that can be used to block node tasks so they are guaranteed to be registered by task manager
|
||||
*/
|
||||
abstract class TestNodesAction extends TransportNodesAction<NodesRequest, NodesResponse, NodeRequest, NodeResponse> {
|
||||
abstract class TestNodesAction extends AbstractTestNodesAction<NodesRequest, NodeRequest> {
|
||||
|
||||
TestNodesAction(Settings settings, String actionName, ClusterName clusterName, ThreadPool threadPool,
|
||||
ClusterService clusterService, TransportService transportService) {
|
||||
super(settings, actionName, clusterName, threadPool, clusterService, transportService,
|
||||
new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY),
|
||||
NodesRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NodesResponse newResponse(NodesRequest request, AtomicReferenceArray responses) {
|
||||
final List<NodeResponse> nodesList = new ArrayList<>();
|
||||
int failureCount = 0;
|
||||
for (int i = 0; i < responses.length(); i++) {
|
||||
Object resp = responses.get(i);
|
||||
if (resp instanceof NodeResponse) { // will also filter out null response for unallocated ones
|
||||
nodesList.add((NodeResponse) resp);
|
||||
} else if (resp instanceof FailedNodeException) {
|
||||
failureCount++;
|
||||
} else {
|
||||
logger.warn("unknown response type [{}], expected NodeLocalGatewayMetaState or FailedNodeException", resp);
|
||||
}
|
||||
}
|
||||
return new NodesResponse(clusterName, nodesList.toArray(new NodeResponse[nodesList.size()]), failureCount);
|
||||
super(settings, actionName, clusterName, threadPool, clusterService, transportService, NodesRequest::new, NodeRequest::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -332,14 +178,6 @@ public class TransportTasksActionTests extends ESTestCase {
|
|||
protected NodeResponse newNodeResponse() {
|
||||
return new NodeResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected abstract NodeResponse nodeOperation(NodeRequest request);
|
||||
|
||||
@Override
|
||||
protected boolean accumulateExceptions() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
static class TestTaskResponse implements Writeable<TestTaskResponse> {
|
||||
|
@ -411,7 +249,7 @@ public class TransportTasksActionTests extends ESTestCase {
|
|||
/**
|
||||
* Test class for testing task operations
|
||||
*/
|
||||
static abstract class TestTasksAction extends TransportTasksAction<TestTasksRequest, TestTasksResponse, TestTaskResponse> {
|
||||
static abstract class TestTasksAction extends TransportTasksAction<Task, TestTasksRequest, TestTasksResponse, TestTaskResponse> {
|
||||
|
||||
protected TestTasksAction(Settings settings, String actionName, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService) {
|
||||
|
@ -548,7 +386,7 @@ public class TransportTasksActionTests extends ESTestCase {
|
|||
assertEquals(testNodes.length, response.getPerNodeTasks().size());
|
||||
for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : response.getPerNodeTasks().entrySet()) {
|
||||
assertEquals(1, entry.getValue().size());
|
||||
assertEquals("NodeRequest[Test Request, true]", entry.getValue().get(0).getDescription());
|
||||
assertEquals("CancellableNodeRequest[Test Request, true]", entry.getValue().get(0).getDescription());
|
||||
}
|
||||
|
||||
// Make sure that the main task on coordinating node is the task that was returned to us by execute()
|
||||
|
@ -648,7 +486,7 @@ public class TransportTasksActionTests extends ESTestCase {
|
|||
assertEquals(testNodes.length, response.getPerNodeTasks().size());
|
||||
for (Map.Entry<DiscoveryNode, List<TaskInfo>> entry : response.getPerNodeTasks().entrySet()) {
|
||||
assertEquals(1, entry.getValue().size());
|
||||
assertEquals("NodeRequest[Test Request, true]", entry.getValue().get(0).getDescription());
|
||||
assertEquals("CancellableNodeRequest[Test Request, true]", entry.getValue().get(0).getDescription());
|
||||
}
|
||||
|
||||
// Release all tasks and wait for response
|
||||
|
@ -657,6 +495,61 @@ public class TransportTasksActionTests extends ESTestCase {
|
|||
assertEquals(0, responses.failureCount());
|
||||
}
|
||||
|
||||
public void testCancellingTasksThatDontSupportCancellation() throws Exception {
|
||||
setupTestNodes(Settings.EMPTY);
|
||||
connectNodes(testNodes);
|
||||
CountDownLatch checkLatch = new CountDownLatch(1);
|
||||
CountDownLatch responseLatch = new CountDownLatch(1);
|
||||
Task task = startBlockingTestNodesAction(checkLatch, new ActionListener<NodesResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesResponse nodeResponses) {
|
||||
responseLatch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
responseLatch.countDown();
|
||||
}
|
||||
});
|
||||
String actionName = "testAction"; // only pick the main action
|
||||
|
||||
// Try to cancel main task using action name
|
||||
CancelTasksRequest request = new CancelTasksRequest(testNodes[0].discoveryNode.getId());
|
||||
request.reason("Testing Cancellation");
|
||||
request.actions(actionName);
|
||||
CancelTasksResponse response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request)
|
||||
.get();
|
||||
|
||||
// Shouldn't match any tasks since testAction doesn't support cancellation
|
||||
assertEquals(0, response.getTasks().size());
|
||||
assertEquals(0, response.getTaskFailures().size());
|
||||
assertEquals(0, response.getNodeFailures().size());
|
||||
|
||||
|
||||
// Try to cancel main task using id
|
||||
request = new CancelTasksRequest(testNodes[0].discoveryNode.getId());
|
||||
request.reason("Testing Cancellation");
|
||||
request.taskId(task.getId());
|
||||
response = testNodes[randomIntBetween(0, testNodes.length - 1)].transportCancelTasksAction.execute(request).get();
|
||||
|
||||
// Shouldn't match any tasks since testAction doesn't support cancellation
|
||||
assertEquals(0, response.getTasks().size());
|
||||
assertEquals(0, response.getTaskFailures().size());
|
||||
assertEquals(1, response.getNodeFailures().size());
|
||||
assertThat(response.getNodeFailures().get(0).getDetailedMessage(), containsString("doesn't support cancellation"));
|
||||
|
||||
// Make sure that task is still running
|
||||
ListTasksRequest listTasksRequest = new ListTasksRequest();
|
||||
listTasksRequest.actions(actionName);
|
||||
ListTasksResponse listResponse = testNodes[randomIntBetween(0, testNodes.length - 1)].transportListTasksAction.execute
|
||||
(listTasksRequest).get();
|
||||
assertEquals(1, listResponse.getPerNodeTasks().size());
|
||||
|
||||
// Release all tasks and wait for response
|
||||
checkLatch.countDown();
|
||||
responseLatch.await(10, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
public void testFailedTasksCount() throws ExecutionException, InterruptedException, IOException {
|
||||
Settings settings = Settings.builder().put(MockTaskManager.USE_MOCK_TASK_MANAGER_SETTING.getKey(), true).build();
|
||||
setupTestNodes(settings);
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.aliases;
|
|||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse;
|
||||
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
|
||||
|
@ -54,6 +56,8 @@ import java.util.concurrent.TimeUnit;
|
|||
|
||||
import static org.elasticsearch.client.Requests.createIndexRequest;
|
||||
import static org.elasticsearch.client.Requests.indexRequest;
|
||||
import static org.elasticsearch.cluster.metadata.AliasAction.Type.ADD;
|
||||
import static org.elasticsearch.cluster.metadata.AliasAction.Type.REMOVE;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_METADATA_BLOCK;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_READ_ONLY_BLOCK;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_METADATA;
|
||||
|
@ -588,7 +592,7 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
.addAlias("foobar", "foo"));
|
||||
|
||||
assertAcked(admin().indices().prepareAliases()
|
||||
.addAliasAction(new AliasAction(AliasAction.Type.ADD, "foobar", "bac").routing("bla")));
|
||||
.addAliasAction(new AliasAction(ADD, "foobar", "bac").routing("bla")));
|
||||
|
||||
logger.info("--> getting bar and baz for index bazbar");
|
||||
getResponse = admin().indices().prepareGetAliases("bar", "bac").addIndices("bazbar").get();
|
||||
|
@ -724,8 +728,8 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
assertAcked(admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction(null, "alias1")));
|
||||
fail("create alias should have failed due to null index");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat("Exception text does not contain \"Alias action [add]: [index] may not be empty string\"",
|
||||
e.getMessage(), containsString("Alias action [add]: [index] may not be empty string"));
|
||||
assertThat("Exception text does not contain \"Alias action [add]: [index/indices] may not be empty string\"",
|
||||
e.getMessage(), containsString("Alias action [add]: [index/indices] may not be empty string"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -740,8 +744,8 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
assertAcked(admin().indices().prepareAliases().addAlias((String) null, "empty-alias"));
|
||||
fail("create alias should have failed due to null index");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat("Exception text does not contain \"Alias action [add]: [index] may not be empty string\"",
|
||||
e.getMessage(), containsString("Alias action [add]: [index] may not be empty string"));
|
||||
assertThat("Exception text does not contain \"Alias action [add]: [index/indices] may not be empty string\"",
|
||||
e.getMessage(), containsString("Alias action [add]: [index/indices] may not be empty string"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -750,7 +754,13 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("", "alias1")).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[index] may not be empty string"));
|
||||
assertThat(e.getMessage(), containsString("[index/indices] may not be empty string"));
|
||||
}
|
||||
try {
|
||||
admin().indices().prepareAliases().addAliasAction(new AliasActions(ADD, "", "alias1")).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[index/indices] may not be empty string"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -759,7 +769,19 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("index1", null)).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[alias] may not be empty string"));
|
||||
assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
|
||||
}
|
||||
try {
|
||||
admin().indices().prepareAliases().addAliasAction(new AliasActions(ADD, "index1", (String)null)).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
|
||||
}
|
||||
try {
|
||||
admin().indices().prepareAliases().addAliasAction(new AliasActions(ADD, "index1", (String[])null)).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[alias/aliases] is either missing or null"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -768,7 +790,13 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("index1", "")).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[alias] may not be empty string"));
|
||||
assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
|
||||
}
|
||||
try {
|
||||
admin().indices().prepareAliases().addAliasAction(new AliasActions(ADD, "index1", "")).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -780,6 +808,13 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
assertThat(e.validationErrors(), notNullValue());
|
||||
assertThat(e.validationErrors().size(), equalTo(2));
|
||||
}
|
||||
try {
|
||||
admin().indices().prepareAliases().addAliasAction(new AliasActions(ADD, null, (String)null)).get();
|
||||
fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.validationErrors(), notNullValue());
|
||||
assertThat(e.validationErrors().size(), equalTo(2));
|
||||
}
|
||||
}
|
||||
|
||||
public void testAddAliasEmptyAliasEmptyIndex() {
|
||||
|
@ -790,6 +825,13 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
assertThat(e.validationErrors(), notNullValue());
|
||||
assertThat(e.validationErrors().size(), equalTo(2));
|
||||
}
|
||||
try {
|
||||
admin().indices().prepareAliases().addAliasAction(new AliasActions(ADD, "", "")).get();
|
||||
fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.validationErrors(), notNullValue());
|
||||
assertThat(e.validationErrors().size(), equalTo(2));
|
||||
}
|
||||
}
|
||||
|
||||
public void testRemoveAliasNullIndex() {
|
||||
|
@ -797,7 +839,13 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction(null, "alias1")).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[index] may not be empty string"));
|
||||
assertThat(e.getMessage(), containsString("[index/indices] may not be empty string"));
|
||||
}
|
||||
try {
|
||||
admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, null, "alias1")).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[index/indices] may not be empty string"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -806,7 +854,13 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("", "alias1")).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[index] may not be empty string"));
|
||||
assertThat(e.getMessage(), containsString("[index/indices] may not be empty string"));
|
||||
}
|
||||
try {
|
||||
admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, "", "alias1")).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[index/indices] may not be empty string"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -815,7 +869,19 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("index1", null)).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[alias] may not be empty string"));
|
||||
assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
|
||||
}
|
||||
try {
|
||||
admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, "index1", (String)null)).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
|
||||
}
|
||||
try {
|
||||
admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, "index1", (String[])null)).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[alias/aliases] is either missing or null"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -824,7 +890,13 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
admin().indices().prepareAliases().addAliasAction(AliasAction.newRemoveAliasAction("index1", "")).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[alias] may not be empty string"));
|
||||
assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
|
||||
}
|
||||
try {
|
||||
admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, "index1", "")).get();
|
||||
fail("Expected ActionRequestValidationException");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(), containsString("[alias/aliases] may not be empty string"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -836,6 +908,20 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
assertThat(e.validationErrors(), notNullValue());
|
||||
assertThat(e.validationErrors().size(), equalTo(2));
|
||||
}
|
||||
try {
|
||||
admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, null, (String)null)).get();
|
||||
fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.validationErrors(), notNullValue());
|
||||
assertThat(e.validationErrors().size(), equalTo(2));
|
||||
}
|
||||
try {
|
||||
admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, (String[])null, (String[])null)).get();
|
||||
fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.validationErrors(), notNullValue());
|
||||
assertThat(e.validationErrors().size(), equalTo(2));
|
||||
}
|
||||
}
|
||||
|
||||
public void testRemoveAliasEmptyAliasEmptyIndex() {
|
||||
|
@ -846,6 +932,13 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
assertThat(e.validationErrors(), notNullValue());
|
||||
assertThat(e.validationErrors().size(), equalTo(2));
|
||||
}
|
||||
try {
|
||||
admin().indices().prepareAliases().addAliasAction(new AliasActions(REMOVE, "", "")).get();
|
||||
fail("Should throw " + ActionRequestValidationException.class.getSimpleName());
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.validationErrors(), notNullValue());
|
||||
assertThat(e.validationErrors().size(), equalTo(2));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetAllAliasesWorks() {
|
||||
|
|
|
@ -396,7 +396,7 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase<SimpleQ
|
|||
assertThat(query, instanceOf(BooleanQuery.class));
|
||||
BooleanQuery boolQuery = (BooleanQuery) query;
|
||||
int expectedMinimumShouldMatch = numberOfTerms * percent / 100;
|
||||
if (simpleQueryStringBuilder.defaultOperator().equals(Operator.AND) && numberOfTerms > 1) {
|
||||
if (numberOfTerms == 1 || simpleQueryStringBuilder.defaultOperator().equals(Operator.AND)) {
|
||||
expectedMinimumShouldMatch = 0;
|
||||
}
|
||||
assertEquals(expectedMinimumShouldMatch, boolQuery.getMinimumNumberShouldMatch());
|
||||
|
|
|
@ -459,6 +459,11 @@ public class FunctionScoreTests extends ESTestCase {
|
|||
protected boolean doEquals(ScoreFunction other) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
public void testSimpleWeightedFunction() throws IOException, ExecutionException, InterruptedException {
|
||||
|
@ -615,21 +620,7 @@ public class FunctionScoreTests extends ESTestCase {
|
|||
Float minScore = randomBoolean() ? null : 1.0f;
|
||||
CombineFunction combineFunction = randomFrom(CombineFunction.values());
|
||||
float maxBoost = randomBoolean() ? Float.POSITIVE_INFINITY : randomFloat();
|
||||
ScoreFunction function = randomBoolean() ? null : new ScoreFunction(combineFunction) {
|
||||
@Override
|
||||
public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean needsScores() {
|
||||
return false;
|
||||
}
|
||||
@Override
|
||||
protected boolean doEquals(ScoreFunction other) {
|
||||
return other == this;
|
||||
}
|
||||
};
|
||||
ScoreFunction function = randomBoolean() ? null : new DummyScoreFunction(combineFunction);
|
||||
|
||||
FunctionScoreQuery q = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, minScore, combineFunction, maxBoost);
|
||||
FunctionScoreQuery q1 = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, minScore, combineFunction, maxBoost);
|
||||
|
@ -640,23 +631,7 @@ public class FunctionScoreTests extends ESTestCase {
|
|||
|
||||
FunctionScoreQuery diffQuery = new FunctionScoreQuery(new TermQuery(new Term("foo", "baz")), function, minScore, combineFunction, maxBoost);
|
||||
FunctionScoreQuery diffMinScore = new FunctionScoreQuery(q.getSubQuery(), function, minScore == null ? 1.0f : null, combineFunction, maxBoost);
|
||||
ScoreFunction otherFunciton = function == null ? new ScoreFunction(combineFunction) {
|
||||
@Override
|
||||
public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean needsScores() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean doEquals(ScoreFunction other) {
|
||||
return other == this;
|
||||
}
|
||||
|
||||
} : null;
|
||||
ScoreFunction otherFunciton = function == null ? new DummyScoreFunction(combineFunction) : null;
|
||||
FunctionScoreQuery diffFunction = new FunctionScoreQuery(q.getSubQuery(), otherFunciton, minScore, combineFunction, maxBoost);
|
||||
FunctionScoreQuery diffMaxBoost = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, minScore, combineFunction, maxBoost == 1.0f ? 0.9f : 1.0f);
|
||||
q1.setBoost(3.0f);
|
||||
|
@ -685,22 +660,7 @@ public class FunctionScoreTests extends ESTestCase {
|
|||
public void testFilterFunctionScoreHashCodeAndEquals() {
|
||||
ScoreMode mode = randomFrom(ScoreMode.values());
|
||||
CombineFunction combineFunction = randomFrom(CombineFunction.values());
|
||||
ScoreFunction scoreFunction = new ScoreFunction(combineFunction) {
|
||||
@Override
|
||||
public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean needsScores() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean doEquals(ScoreFunction other) {
|
||||
return other == this;
|
||||
}
|
||||
};
|
||||
ScoreFunction scoreFunction = new DummyScoreFunction(combineFunction);
|
||||
Float minScore = randomBoolean() ? null : 1.0f;
|
||||
Float maxBoost = randomBoolean() ? Float.POSITIVE_INFINITY : randomFloat();
|
||||
|
||||
|
@ -742,4 +702,30 @@ public class FunctionScoreTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class DummyScoreFunction extends ScoreFunction {
|
||||
protected DummyScoreFunction(CombineFunction scoreCombiner) {
|
||||
super(scoreCombiner);
|
||||
}
|
||||
|
||||
@Override
|
||||
public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean needsScores() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean doEquals(ScoreFunction other) {
|
||||
return other == this;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return 0;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -228,7 +228,7 @@ public class SimilarityTests extends ESSingleNodeTestCase {
|
|||
indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
|
||||
fail("Expected MappingParsingException");
|
||||
} catch (MapperParsingException e) {
|
||||
assertThat(e.getMessage(), equalTo("Unknown Similarity type [unknown_similarity] for [field1]"));
|
||||
assertThat(e.getMessage(), equalTo("Unknown Similarity type [unknown_similarity] for field [field1]"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -255,7 +255,7 @@ public class SimilarityTests extends ESSingleNodeTestCase {
|
|||
parser.parse("type", new CompressedXContent(mapping));
|
||||
fail("Expected MappingParsingException");
|
||||
} catch (MapperParsingException e) {
|
||||
assertThat(e.getMessage(), equalTo("Unknown Similarity type [default] for [field1]"));
|
||||
assertThat(e.getMessage(), equalTo("Unknown Similarity type [default] for field [field1]"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.test.ESIntegTestCase;
|
|||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
@ -337,4 +338,20 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testUpdateMappingOnAllTypes() throws IOException {
|
||||
assertAcked(prepareCreate("index").addMapping("type1", "f", "type=string").addMapping("type2", "f", "type=string"));
|
||||
|
||||
assertAcked(client().admin().indices().preparePutMapping("index")
|
||||
.setType("type1")
|
||||
.setUpdateAllTypes(true)
|
||||
.setSource("f", "type=string,analyzer=default,null_value=n/a")
|
||||
.get());
|
||||
|
||||
GetMappingsResponse mappings = client().admin().indices().prepareGetMappings("index").setTypes("type2").get();
|
||||
MappingMetaData type2Mapping = mappings.getMappings().get("index").get("type2").get();
|
||||
Map<String, Object> properties = (Map<String, Object>) type2Mapping.sourceAsMap().get("properties");
|
||||
Map<String, Object> f = (Map<String, Object>) properties.get("f");
|
||||
assertEquals("n/a", f.get("null_value"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -245,8 +245,8 @@ public class SearchSourceBuilderTests extends ESTestCase {
|
|||
builder.sort(SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values())));
|
||||
break;
|
||||
case 1:
|
||||
builder.sort(SortBuilders.geoDistanceSort(randomAsciiOfLengthBetween(5, 20))
|
||||
.geohashes(AbstractQueryTestCase.randomGeohash(1, 12)).order(randomFrom(SortOrder.values())));
|
||||
builder.sort(SortBuilders.geoDistanceSort(randomAsciiOfLengthBetween(5, 20),
|
||||
AbstractQueryTestCase.randomGeohash(1, 12)).order(randomFrom(SortOrder.values())));
|
||||
break;
|
||||
case 2:
|
||||
builder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values())));
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.sort;
|
||||
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public abstract class AbstractSortTestCase<T extends NamedWriteable<T> & ToXContent & SortElementParserTemp<T>> extends ESTestCase {
|
||||
|
||||
protected static NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
private static final int NUMBER_OF_TESTBUILDERS = 20;
|
||||
static IndicesQueriesRegistry indicesQueriesRegistry;
|
||||
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
namedWriteableRegistry.registerPrototype(GeoDistanceSortBuilder.class, GeoDistanceSortBuilder.PROTOTYPE);
|
||||
indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).buildQueryParserRegistry();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
namedWriteableRegistry = null;
|
||||
}
|
||||
|
||||
/** Returns random sort that is put under test */
|
||||
protected abstract T createTestItem();
|
||||
|
||||
/** Returns mutated version of original so the returned sort is different in terms of equals/hashcode */
|
||||
protected abstract T mutate(T original) throws IOException;
|
||||
|
||||
/**
|
||||
* Test that creates new sort from a random test sort and checks both for equality
|
||||
*/
|
||||
public void testFromXContent() throws IOException {
|
||||
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
|
||||
T testItem = createTestItem();
|
||||
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
|
||||
if (randomBoolean()) {
|
||||
builder.prettyPrint();
|
||||
}
|
||||
builder.startObject();
|
||||
testItem.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
|
||||
XContentParser itemParser = XContentHelper.createParser(builder.bytes());
|
||||
itemParser.nextToken();
|
||||
|
||||
/*
|
||||
* filter out name of sort, or field name to sort on for element fieldSort
|
||||
*/
|
||||
itemParser.nextToken();
|
||||
String elementName = itemParser.currentName();
|
||||
itemParser.nextToken();
|
||||
|
||||
QueryParseContext context = new QueryParseContext(indicesQueriesRegistry);
|
||||
context.reset(itemParser);
|
||||
NamedWriteable<T> parsedItem = testItem.fromXContent(context, elementName);
|
||||
assertNotSame(testItem, parsedItem);
|
||||
assertEquals(testItem, parsedItem);
|
||||
assertEquals(testItem.hashCode(), parsedItem.hashCode());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test serialization and deserialization of the test sort.
|
||||
*/
|
||||
public void testSerialization() throws IOException {
|
||||
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
|
||||
T testsort = createTestItem();
|
||||
T deserializedsort = copyItem(testsort);
|
||||
assertEquals(testsort, deserializedsort);
|
||||
assertEquals(testsort.hashCode(), deserializedsort.hashCode());
|
||||
assertNotSame(testsort, deserializedsort);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test equality and hashCode properties
|
||||
*/
|
||||
public void testEqualsAndHashcode() throws IOException {
|
||||
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
|
||||
T firstsort = createTestItem();
|
||||
assertFalse("sort is equal to null", firstsort.equals(null));
|
||||
assertFalse("sort is equal to incompatible type", firstsort.equals(""));
|
||||
assertTrue("sort is not equal to self", firstsort.equals(firstsort));
|
||||
assertThat("same sort's hashcode returns different values if called multiple times", firstsort.hashCode(),
|
||||
equalTo(firstsort.hashCode()));
|
||||
assertThat("different sorts should not be equal", mutate(firstsort), not(equalTo(firstsort)));
|
||||
assertThat("different sorts should have different hashcode", mutate(firstsort).hashCode(), not(equalTo(firstsort.hashCode())));
|
||||
|
||||
T secondsort = copyItem(firstsort);
|
||||
assertTrue("sort is not equal to self", secondsort.equals(secondsort));
|
||||
assertTrue("sort is not equal to its copy", firstsort.equals(secondsort));
|
||||
assertTrue("equals is not symmetric", secondsort.equals(firstsort));
|
||||
assertThat("sort copy's hashcode is different from original hashcode", secondsort.hashCode(), equalTo(firstsort.hashCode()));
|
||||
|
||||
T thirdsort = copyItem(secondsort);
|
||||
assertTrue("sort is not equal to self", thirdsort.equals(thirdsort));
|
||||
assertTrue("sort is not equal to its copy", secondsort.equals(thirdsort));
|
||||
assertThat("sort copy's hashcode is different from original hashcode", secondsort.hashCode(), equalTo(thirdsort.hashCode()));
|
||||
assertTrue("equals is not transitive", firstsort.equals(thirdsort));
|
||||
assertThat("sort copy's hashcode is different from original hashcode", firstsort.hashCode(), equalTo(thirdsort.hashCode()));
|
||||
assertTrue("equals is not symmetric", thirdsort.equals(secondsort));
|
||||
assertTrue("equals is not symmetric", thirdsort.equals(firstsort));
|
||||
}
|
||||
}
|
||||
|
||||
protected T copyItem(T original) throws IOException {
|
||||
try (BytesStreamOutput output = new BytesStreamOutput()) {
|
||||
original.writeTo(output);
|
||||
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) {
|
||||
@SuppressWarnings("unchecked")
|
||||
T prototype = (T) namedWriteableRegistry.getPrototype(getPrototype(), original.getWriteableName());
|
||||
T copy = (T) prototype.readFrom(in);
|
||||
return copy;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract Class<T> getPrototype();
|
||||
}
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.common.xcontent.json.JsonXContent;
|
|||
import org.elasticsearch.index.query.GeoDistanceQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.sort.SortBuilders;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
@ -53,7 +52,6 @@ import static org.elasticsearch.index.query.QueryBuilders.geoDistanceRangeQuery;
|
|||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFailures;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits;
|
||||
|
@ -62,7 +60,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSear
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId;
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
import static org.hamcrest.Matchers.closeTo;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
|
||||
|
@ -201,14 +198,14 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
// SORTING
|
||||
|
||||
searchResponse = client().prepareSearch().setQuery(matchAllQuery())
|
||||
.addSort(SortBuilders.geoDistanceSort("location").point(40.7143528, -74.0059731).order(SortOrder.ASC)).execute()
|
||||
.addSort(SortBuilders.geoDistanceSort("location", 40.7143528, -74.0059731).order(SortOrder.ASC)).execute()
|
||||
.actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 7);
|
||||
assertOrderedSearchHits(searchResponse, "1", "3", "4", "5", "6", "2", "7");
|
||||
|
||||
searchResponse = client().prepareSearch().setQuery(matchAllQuery())
|
||||
.addSort(SortBuilders.geoDistanceSort("location").point(40.7143528, -74.0059731).order(SortOrder.DESC)).execute()
|
||||
.addSort(SortBuilders.geoDistanceSort("location", 40.7143528, -74.0059731).order(SortOrder.DESC)).execute()
|
||||
.actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 7);
|
||||
|
@ -262,7 +259,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
|
||||
// Order: Asc
|
||||
SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC)).execute()
|
||||
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)).execute()
|
||||
.actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 5);
|
||||
|
@ -275,7 +272,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
|
||||
// Order: Asc, Mode: max
|
||||
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max"))
|
||||
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max"))
|
||||
.execute().actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 5);
|
||||
|
@ -288,7 +285,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
|
||||
// Order: Desc
|
||||
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC)).execute()
|
||||
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)).execute()
|
||||
.actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 5);
|
||||
|
@ -301,7 +298,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
|
||||
// Order: Desc, Mode: min
|
||||
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min"))
|
||||
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min"))
|
||||
.execute().actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 5);
|
||||
|
@ -313,7 +310,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
||||
|
||||
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC))
|
||||
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC))
|
||||
.execute().actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 5);
|
||||
|
@ -325,7 +322,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(5301d, 10d));
|
||||
|
||||
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.DESC))
|
||||
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode("avg").order(SortOrder.DESC))
|
||||
.execute().actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 5);
|
||||
|
@ -336,10 +333,13 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(421.2d, 10d));
|
||||
assertThat(((Number) searchResponse.getHits().getAt(4).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
||||
|
||||
assertFailures(
|
||||
try {
|
||||
client().prepareSearch("test").setQuery(matchAllQuery())
|
||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).sortMode("sum")),
|
||||
RestStatus.BAD_REQUEST, containsString("sort_mode [sum] isn't supported for sorting by geo distance"));
|
||||
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).sortMode("sum"));
|
||||
fail("sum should not be supported for sorting by geo distance");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
// Regression bug:
|
||||
|
@ -371,7 +371,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
|
||||
// Order: Asc
|
||||
SearchResponse searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.ASC)).execute()
|
||||
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.ASC)).execute()
|
||||
.actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 2);
|
||||
|
@ -381,7 +381,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
|
||||
// Order: Desc
|
||||
searchResponse = client().prepareSearch("test").setQuery(matchAllQuery())
|
||||
.addSort(SortBuilders.geoDistanceSort("locations").point(40.7143528, -74.0059731).order(SortOrder.DESC)).execute()
|
||||
.addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)).execute()
|
||||
.actionGet();
|
||||
|
||||
// Doc with missing geo point is first, is consistent with 0.20.x
|
||||
|
@ -444,7 +444,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
|
||||
// Order: Asc
|
||||
SearchResponse searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders
|
||||
.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).order(SortOrder.ASC).setNestedPath("branches"))
|
||||
.geoDistanceSort("branches.location", 40.7143528, -74.0059731).order(SortOrder.ASC).setNestedPath("branches"))
|
||||
.execute().actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 4);
|
||||
|
@ -456,8 +456,8 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
|
||||
// Order: Asc, Mode: max
|
||||
searchResponse = client()
|
||||
.prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location")
|
||||
.point(40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max").setNestedPath("branches"))
|
||||
.prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location",
|
||||
40.7143528, -74.0059731).order(SortOrder.ASC).sortMode("max").setNestedPath("branches"))
|
||||
.execute().actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 4);
|
||||
|
@ -469,7 +469,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
|
||||
// Order: Desc
|
||||
searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders
|
||||
.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).order(SortOrder.DESC).setNestedPath("branches"))
|
||||
.geoDistanceSort("branches.location", 40.7143528, -74.0059731).order(SortOrder.DESC).setNestedPath("branches"))
|
||||
.execute().actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 4);
|
||||
|
@ -481,8 +481,8 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
|
||||
// Order: Desc, Mode: min
|
||||
searchResponse = client()
|
||||
.prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location")
|
||||
.point(40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min").setNestedPath("branches"))
|
||||
.prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location",
|
||||
40.7143528, -74.0059731).order(SortOrder.DESC).sortMode("min").setNestedPath("branches"))
|
||||
.execute().actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 4);
|
||||
|
@ -493,8 +493,8 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
||||
|
||||
searchResponse = client()
|
||||
.prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location")
|
||||
.point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC).setNestedPath("branches"))
|
||||
.prepareSearch("companies").setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location",
|
||||
40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC).setNestedPath("branches"))
|
||||
.execute().actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 4);
|
||||
|
@ -505,8 +505,8 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(5301.0d, 10d));
|
||||
|
||||
searchResponse = client().prepareSearch("companies")
|
||||
.setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location").setNestedPath("branches")
|
||||
.point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.DESC).setNestedPath("branches"))
|
||||
.setQuery(matchAllQuery()).addSort(SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731)
|
||||
.setNestedPath("branches").sortMode("avg").order(SortOrder.DESC).setNestedPath("branches"))
|
||||
.execute().actionGet();
|
||||
|
||||
assertHitCount(searchResponse, 4);
|
||||
|
@ -517,8 +517,9 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), closeTo(0d, 10d));
|
||||
|
||||
searchResponse = client().prepareSearch("companies").setQuery(matchAllQuery())
|
||||
.addSort(SortBuilders.geoDistanceSort("branches.location").setNestedFilter(termQuery("branches.name", "brooklyn"))
|
||||
.point(40.7143528, -74.0059731).sortMode("avg").order(SortOrder.ASC).setNestedPath("branches"))
|
||||
.addSort(SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731)
|
||||
.setNestedFilter(termQuery("branches.name", "brooklyn"))
|
||||
.sortMode("avg").order(SortOrder.ASC).setNestedPath("branches"))
|
||||
.execute().actionGet();
|
||||
assertHitCount(searchResponse, 4);
|
||||
assertFirstHit(searchResponse, hasId("4"));
|
||||
|
@ -528,11 +529,14 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
assertThat(((Number) searchResponse.getHits().getAt(2).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
|
||||
assertThat(((Number) searchResponse.getHits().getAt(3).sortValues()[0]).doubleValue(), equalTo(Double.MAX_VALUE));
|
||||
|
||||
assertFailures(
|
||||
try {
|
||||
client().prepareSearch("companies").setQuery(matchAllQuery())
|
||||
.addSort(SortBuilders.geoDistanceSort("branches.location").point(40.7143528, -74.0059731).sortMode("sum")
|
||||
.setNestedPath("branches")),
|
||||
RestStatus.BAD_REQUEST, containsString("sort_mode [sum] isn't supported for sorting by geo distance"));
|
||||
.addSort(SortBuilders.geoDistanceSort("branches.location", 40.7143528, -74.0059731).sortMode("sum")
|
||||
.setNestedPath("branches"));
|
||||
fail("Sum should not be allowed as sort mode");
|
||||
} catch (IllegalArgumentException e) {
|
||||
//expected
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -95,7 +95,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
|
|||
|
||||
SearchResponse searchResponse = client().prepareSearch()
|
||||
.setQuery(matchAllQuery())
|
||||
.addSort(new GeoDistanceSortBuilder("location").points(q).sortMode("min").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
|
||||
.addSort(new GeoDistanceSortBuilder("location", q).sortMode("min").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
|
||||
.execute().actionGet();
|
||||
assertOrderedSearchHits(searchResponse, "d1", "d2");
|
||||
assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 2, 3, 2, DistanceUnit.KILOMETERS), 0.01d));
|
||||
|
@ -103,7 +103,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
|
|||
|
||||
searchResponse = client().prepareSearch()
|
||||
.setQuery(matchAllQuery())
|
||||
.addSort(new GeoDistanceSortBuilder("location").points(q).sortMode("min").order(SortOrder.DESC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
|
||||
.addSort(new GeoDistanceSortBuilder("location", q).sortMode("min").order(SortOrder.DESC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
|
||||
.execute().actionGet();
|
||||
assertOrderedSearchHits(searchResponse, "d2", "d1");
|
||||
assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 1, 5, 1, DistanceUnit.KILOMETERS), 0.01d));
|
||||
|
@ -111,7 +111,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
|
|||
|
||||
searchResponse = client().prepareSearch()
|
||||
.setQuery(matchAllQuery())
|
||||
.addSort(new GeoDistanceSortBuilder("location").points(q).sortMode("max").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
|
||||
.addSort(new GeoDistanceSortBuilder("location", q).sortMode("max").order(SortOrder.ASC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
|
||||
.execute().actionGet();
|
||||
assertOrderedSearchHits(searchResponse, "d1", "d2");
|
||||
assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 2, 4, 1, DistanceUnit.KILOMETERS), 0.01d));
|
||||
|
@ -119,7 +119,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
|
|||
|
||||
searchResponse = client().prepareSearch()
|
||||
.setQuery(matchAllQuery())
|
||||
.addSort(new GeoDistanceSortBuilder("location").points(q).sortMode("max").order(SortOrder.DESC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
|
||||
.addSort(new GeoDistanceSortBuilder("location", q).sortMode("max").order(SortOrder.DESC).geoDistance(GeoDistance.PLANE).unit(DistanceUnit.KILOMETERS))
|
||||
.execute().actionGet();
|
||||
assertOrderedSearchHits(searchResponse, "d2", "d1");
|
||||
assertThat((Double)searchResponse.getHits().getAt(0).getSortValues()[0], closeTo(GeoDistance.PLANE.calculate(2, 1, 6, 2, DistanceUnit.KILOMETERS), 0.01d));
|
||||
|
@ -139,6 +139,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
|
|||
builder.endObject();
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
public void testManyToManyGeoPointsWithDifferentFormats() throws ExecutionException, InterruptedException, IOException {
|
||||
/** q d1 d2
|
||||
* |4 o| x | x
|
||||
|
@ -171,13 +172,21 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
|
|||
List<GeoPoint> qPoints = new ArrayList<>();
|
||||
createQPoints(qHashes, qPoints);
|
||||
|
||||
GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder("location");
|
||||
GeoDistanceSortBuilder geoDistanceSortBuilder = null;
|
||||
for (int i = 0; i < 4; i++) {
|
||||
int at = randomInt(3 - i);
|
||||
if (randomBoolean()) {
|
||||
geoDistanceSortBuilder.geohashes(qHashes.get(at));
|
||||
if (geoDistanceSortBuilder == null) {
|
||||
geoDistanceSortBuilder = new GeoDistanceSortBuilder("location", qHashes.get(at));
|
||||
} else {
|
||||
geoDistanceSortBuilder.geohashes(qHashes.get(at));
|
||||
}
|
||||
} else {
|
||||
geoDistanceSortBuilder.points(qPoints.get(at));
|
||||
if (geoDistanceSortBuilder == null) {
|
||||
geoDistanceSortBuilder = new GeoDistanceSortBuilder("location", qPoints.get(at));
|
||||
} else {
|
||||
geoDistanceSortBuilder.points(qPoints.get(at));
|
||||
}
|
||||
}
|
||||
qHashes.remove(at);
|
||||
qPoints.remove(at);
|
||||
|
@ -210,8 +219,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
|
|||
|
||||
String hashPoint = "s037ms06g7h0";
|
||||
|
||||
GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder("location");
|
||||
geoDistanceSortBuilder.geohashes(hashPoint);
|
||||
GeoDistanceSortBuilder geoDistanceSortBuilder = new GeoDistanceSortBuilder("location", hashPoint);
|
||||
|
||||
SearchResponse searchResponse = client().prepareSearch()
|
||||
.setQuery(matchAllQuery())
|
||||
|
@ -219,8 +227,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
|
|||
.execute().actionGet();
|
||||
checkCorrectSortOrderForGeoSort(searchResponse);
|
||||
|
||||
geoDistanceSortBuilder = new GeoDistanceSortBuilder("location");
|
||||
geoDistanceSortBuilder.points(new GeoPoint(2, 2));
|
||||
geoDistanceSortBuilder = new GeoDistanceSortBuilder("location", new GeoPoint(2, 2));
|
||||
|
||||
searchResponse = client().prepareSearch()
|
||||
.setQuery(matchAllQuery())
|
||||
|
@ -228,8 +235,7 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
|
|||
.execute().actionGet();
|
||||
checkCorrectSortOrderForGeoSort(searchResponse);
|
||||
|
||||
geoDistanceSortBuilder = new GeoDistanceSortBuilder("location");
|
||||
geoDistanceSortBuilder.point(2, 2);
|
||||
geoDistanceSortBuilder = new GeoDistanceSortBuilder("location", 2, 2);
|
||||
|
||||
searchResponse = client().prepareSearch()
|
||||
.setQuery(matchAllQuery())
|
||||
|
@ -240,21 +246,21 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase {
|
|||
searchResponse = client()
|
||||
.prepareSearch()
|
||||
.setSource(
|
||||
new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort("location").point(2.0, 2.0)
|
||||
new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort("location", 2.0, 2.0)
|
||||
.unit(DistanceUnit.KILOMETERS).geoDistance(GeoDistance.PLANE))).execute().actionGet();
|
||||
checkCorrectSortOrderForGeoSort(searchResponse);
|
||||
|
||||
searchResponse = client()
|
||||
.prepareSearch()
|
||||
.setSource(
|
||||
new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort("location").geohashes("s037ms06g7h0")
|
||||
new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort("location", "s037ms06g7h0")
|
||||
.unit(DistanceUnit.KILOMETERS).geoDistance(GeoDistance.PLANE))).execute().actionGet();
|
||||
checkCorrectSortOrderForGeoSort(searchResponse);
|
||||
|
||||
searchResponse = client()
|
||||
.prepareSearch()
|
||||
.setSource(
|
||||
new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort("location").point(2.0, 2.0)
|
||||
new SearchSourceBuilder().sort(SortBuilders.geoDistanceSort("location", 2.0, 2.0)
|
||||
.unit(DistanceUnit.KILOMETERS).geoDistance(GeoDistance.PLANE))).execute().actionGet();
|
||||
checkCorrectSortOrderForGeoSort(searchResponse);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,252 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.sort;
|
||||
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.geo.GeoDistance;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.geo.RandomGeoGenerator;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
public class GeoDistanceSortBuilderTests extends AbstractSortTestCase<GeoDistanceSortBuilder> {
|
||||
|
||||
@Override
|
||||
protected GeoDistanceSortBuilder createTestItem() {
|
||||
String fieldName = randomAsciiOfLengthBetween(1, 10);
|
||||
GeoDistanceSortBuilder result = null;
|
||||
|
||||
int id = randomIntBetween(0, 2);
|
||||
switch(id) {
|
||||
case 0:
|
||||
int count = randomIntBetween(1, 10);
|
||||
String[] geohashes = new String[count];
|
||||
for (int i = 0; i < count; i++) {
|
||||
geohashes[i] = RandomGeoGenerator.randomPoint(getRandom()).geohash();
|
||||
}
|
||||
|
||||
result = new GeoDistanceSortBuilder(fieldName, geohashes);
|
||||
break;
|
||||
case 1:
|
||||
GeoPoint pt = RandomGeoGenerator.randomPoint(getRandom());
|
||||
result = new GeoDistanceSortBuilder(fieldName, pt.getLat(), pt.getLon());
|
||||
break;
|
||||
case 2:
|
||||
result = new GeoDistanceSortBuilder(fieldName, points(new GeoPoint[0]));
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("one of three geo initialisation strategies must be used");
|
||||
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
result.geoDistance(geoDistance(result.geoDistance()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
result.unit(unit(result.unit()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
result.order(RandomSortDataGenerator.order(result.order()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
result.sortMode(mode(result.sortMode()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
result.setNestedFilter(RandomSortDataGenerator.nestedFilter(result.getNestedFilter()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
result.setNestedPath(RandomSortDataGenerator.randomAscii(result.getNestedPath()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
result.coerce(! result.coerce());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
result.ignoreMalformed(! result.ignoreMalformed());
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
private static String mode(String original) {
|
||||
String[] modes = {"MIN", "MAX", "AVG"};
|
||||
String mode = ESTestCase.randomFrom(modes);
|
||||
while (mode.equals(original)) {
|
||||
mode = ESTestCase.randomFrom(modes);
|
||||
}
|
||||
return mode;
|
||||
}
|
||||
|
||||
private DistanceUnit unit(DistanceUnit original) {
|
||||
int id = -1;
|
||||
while (id == -1 || (original != null && original.ordinal() == id)) {
|
||||
id = randomIntBetween(0, DistanceUnit.values().length - 1);
|
||||
}
|
||||
return DistanceUnit.values()[id];
|
||||
}
|
||||
|
||||
private GeoPoint[] points(GeoPoint[] original) {
|
||||
GeoPoint[] result = null;
|
||||
while (result == null || Arrays.deepEquals(original, result)) {
|
||||
int count = randomIntBetween(1, 10);
|
||||
result = new GeoPoint[count];
|
||||
for (int i = 0; i < count; i++) {
|
||||
result[i] = RandomGeoGenerator.randomPoint(getRandom());
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private GeoDistance geoDistance(GeoDistance original) {
|
||||
int id = -1;
|
||||
while (id == -1 || (original != null && original.ordinal() == id)) {
|
||||
id = randomIntBetween(0, GeoDistance.values().length - 1);
|
||||
}
|
||||
return GeoDistance.values()[id];
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GeoDistanceSortBuilder mutate(GeoDistanceSortBuilder original) throws IOException {
|
||||
GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(original);
|
||||
int parameter = randomIntBetween(0, 9);
|
||||
switch (parameter) {
|
||||
case 0:
|
||||
while (Arrays.deepEquals(original.points(), result.points())) {
|
||||
GeoPoint pt = RandomGeoGenerator.randomPoint(getRandom());
|
||||
result.point(pt.getLat(), pt.getLon());
|
||||
}
|
||||
break;
|
||||
case 1:
|
||||
result.points(points(original.points()));
|
||||
break;
|
||||
case 2:
|
||||
result.geoDistance(geoDistance(original.geoDistance()));
|
||||
break;
|
||||
case 3:
|
||||
result.unit(unit(original.unit()));
|
||||
break;
|
||||
case 4:
|
||||
result.order(RandomSortDataGenerator.order(original.order()));
|
||||
break;
|
||||
case 5:
|
||||
result.sortMode(mode(original.sortMode()));
|
||||
break;
|
||||
case 6:
|
||||
result.setNestedFilter(RandomSortDataGenerator.nestedFilter(original.getNestedFilter()));
|
||||
break;
|
||||
case 7:
|
||||
result.setNestedPath(RandomSortDataGenerator.randomAscii(original.getNestedPath()));
|
||||
break;
|
||||
case 8:
|
||||
result.coerce(! original.coerce());
|
||||
break;
|
||||
case 9:
|
||||
// ignore malformed will only be set if coerce is set to true
|
||||
result.coerce(false);
|
||||
result.ignoreMalformed(! original.ignoreMalformed());
|
||||
break;
|
||||
}
|
||||
return result;
|
||||
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
protected Class<GeoDistanceSortBuilder> getPrototype() {
|
||||
return (Class<GeoDistanceSortBuilder>) GeoDistanceSortBuilder.PROTOTYPE.getClass();
|
||||
}
|
||||
|
||||
public void testSortModeSumIsRejectedInSetter() {
|
||||
GeoDistanceSortBuilder builder = new GeoDistanceSortBuilder("testname", -1, -1);
|
||||
GeoPoint point = RandomGeoGenerator.randomPoint(getRandom());
|
||||
builder.point(point.getLat(), point.getLon());
|
||||
try {
|
||||
builder.sortMode("SUM");
|
||||
fail("sort mode sum should not be supported");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// all good
|
||||
}
|
||||
}
|
||||
|
||||
public void testSortModeSumIsRejectedInJSON() throws IOException {
|
||||
String json = "{\n" +
|
||||
" \"testname\" : [ {\n" +
|
||||
" \"lat\" : -6.046997540714173,\n" +
|
||||
" \"lon\" : -51.94128329747579\n" +
|
||||
" } ],\n" +
|
||||
" \"unit\" : \"m\",\n" +
|
||||
" \"distance_type\" : \"sloppy_arc\",\n" +
|
||||
" \"reverse\" : true,\n" +
|
||||
" \"mode\" : \"SUM\",\n" +
|
||||
" \"coerce\" : false,\n" +
|
||||
" \"ignore_malformed\" : false\n" +
|
||||
"}";
|
||||
XContentParser itemParser = XContentHelper.createParser(new BytesArray(json));
|
||||
itemParser.nextToken();
|
||||
|
||||
QueryParseContext context = new QueryParseContext(indicesQueriesRegistry);
|
||||
context.reset(itemParser);
|
||||
|
||||
try {
|
||||
GeoDistanceSortBuilder.PROTOTYPE.fromXContent(context, "");
|
||||
fail("sort mode sum should not be supported");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// all good
|
||||
}
|
||||
}
|
||||
|
||||
public void testGeoDistanceSortCanBeParsedFromGeoHash() throws IOException {
|
||||
String json = "{\n" +
|
||||
" \"VDcvDuFjE\" : [ \"7umzzv8eychg\", \"dmdgmt5z13uw\", " +
|
||||
" \"ezu09wxw6v4c\", \"kc7s3515p6k6\", \"jgeuvjwrmfzn\", \"kcpcfj7ruyf8\" ],\n" +
|
||||
" \"unit\" : \"m\",\n" +
|
||||
" \"distance_type\" : \"sloppy_arc\",\n" +
|
||||
" \"reverse\" : true,\n" +
|
||||
" \"mode\" : \"MAX\",\n" +
|
||||
" \"nested_filter\" : {\n" +
|
||||
" \"ids\" : {\n" +
|
||||
" \"type\" : [ ],\n" +
|
||||
" \"values\" : [ ],\n" +
|
||||
" \"boost\" : 5.711116\n" +
|
||||
" }\n" +
|
||||
" },\n" +
|
||||
" \"coerce\" : false,\n" +
|
||||
" \"ignore_malformed\" : true\n" +
|
||||
" }";
|
||||
XContentParser itemParser = XContentHelper.createParser(new BytesArray(json));
|
||||
itemParser.nextToken();
|
||||
|
||||
QueryParseContext context = new QueryParseContext(indicesQueriesRegistry);
|
||||
context.reset(itemParser);
|
||||
|
||||
GeoDistanceSortBuilder result = GeoDistanceSortBuilder.PROTOTYPE.fromXContent(context, json);
|
||||
assertEquals("[-19.700583312660456, -2.8225036337971687, "
|
||||
+ "31.537466906011105, -74.63590376079082, "
|
||||
+ "43.71844606474042, -5.548660643398762, "
|
||||
+ "-37.20467280596495, 38.71751043945551, "
|
||||
+ "-69.44606635719538, 84.25200328230858, "
|
||||
+ "-39.03717711567879, 44.74099852144718]", Arrays.toString(result.points()));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,113 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.sort;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.index.query.IdsQueryBuilder;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
public class RandomSortDataGenerator {
|
||||
private RandomSortDataGenerator() {
|
||||
// this is a helper class only, doesn't need a constructor
|
||||
}
|
||||
|
||||
public static QueryBuilder nestedFilter(QueryBuilder original) {
|
||||
@SuppressWarnings("rawtypes")
|
||||
QueryBuilder nested = null;
|
||||
while (nested == null || nested.equals(original)) {
|
||||
switch (ESTestCase.randomInt(2)) {
|
||||
case 0:
|
||||
nested = new MatchAllQueryBuilder();
|
||||
break;
|
||||
case 1:
|
||||
nested = new IdsQueryBuilder();
|
||||
break;
|
||||
default:
|
||||
case 2:
|
||||
nested = new TermQueryBuilder(ESTestCase.randomAsciiOfLengthBetween(1, 10), ESTestCase.randomAsciiOfLengthBetween(1, 10));
|
||||
break;
|
||||
}
|
||||
nested.boost((float) ESTestCase.randomDoubleBetween(0, 10, false));
|
||||
}
|
||||
return nested;
|
||||
}
|
||||
|
||||
public static String randomAscii(String original) {
|
||||
String nestedPath = ESTestCase.randomAsciiOfLengthBetween(1, 10);
|
||||
while (nestedPath.equals(original)) {
|
||||
nestedPath = ESTestCase.randomAsciiOfLengthBetween(1, 10);
|
||||
}
|
||||
return nestedPath;
|
||||
}
|
||||
|
||||
public static String mode(String original) {
|
||||
String[] modes = {"min", "max", "avg", "sum"};
|
||||
String mode = ESTestCase.randomFrom(modes);
|
||||
while (mode.equals(original)) {
|
||||
mode = ESTestCase.randomFrom(modes);
|
||||
}
|
||||
return mode;
|
||||
}
|
||||
|
||||
public static Object missing(Object original) {
|
||||
Object missing = null;
|
||||
Object otherMissing = null;
|
||||
if (original instanceof BytesRef) {
|
||||
otherMissing = ((BytesRef) original).utf8ToString();
|
||||
} else {
|
||||
otherMissing = original;
|
||||
}
|
||||
|
||||
while (missing == null || missing.equals(otherMissing)) {
|
||||
int missingId = ESTestCase.randomIntBetween(0, 3);
|
||||
switch (missingId) {
|
||||
case 0:
|
||||
missing = ("_last");
|
||||
break;
|
||||
case 1:
|
||||
missing = ("_first");
|
||||
break;
|
||||
case 2:
|
||||
missing = ESTestCase.randomAsciiOfLength(10);
|
||||
break;
|
||||
case 3:
|
||||
missing = ESTestCase.randomInt();
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Unknown missing type.");
|
||||
|
||||
}
|
||||
}
|
||||
return missing;
|
||||
}
|
||||
|
||||
public static SortOrder order(SortOrder original) {
|
||||
SortOrder order = SortOrder.ASC;
|
||||
if (order.equals(original)) {
|
||||
return SortOrder.DESC;
|
||||
} else {
|
||||
return SortOrder.ASC;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<launchConfiguration type="org.eclipse.jdt.launching.localJavaApplication">
|
||||
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_PATHS">
|
||||
<listEntry value="/elasticsearch/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java"/>
|
||||
</listAttribute>
|
||||
<listAttribute key="org.eclipse.debug.core.MAPPED_RESOURCE_TYPES">
|
||||
<listEntry value="1"/>
|
||||
</listAttribute>
|
||||
<listAttribute key="org.eclipse.debug.ui.favoriteGroups">
|
||||
<listEntry value="org.eclipse.debug.ui.launchGroup.debug"/>
|
||||
<listEntry value="org.eclipse.debug.ui.launchGroup.run"/>
|
||||
</listAttribute>
|
||||
<booleanAttribute key="org.eclipse.jdt.launching.ATTR_USE_START_ON_FIRST_THREAD" value="true"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.MAIN_TYPE" value="org.elasticsearch.bootstrap.Elasticsearch"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.PROGRAM_ARGUMENTS" value="start"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.PROJECT_ATTR" value="elasticsearch"/>
|
||||
<stringAttribute key="org.eclipse.jdt.launching.VM_ARGUMENTS" value="-Xms256m -Xmx1g -Djava.awt.headless=true -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiatingOccupancyOnly -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=logs/heapdump.hprof -Delasticsearch -Des.foreground=yes -ea -Des.path.home=target/eclipse_run -Des.security.manager.enabled=false -Des.node.attr=test"/>
|
||||
</launchConfiguration>
|
|
@ -1,16 +0,0 @@
|
|||
Licensed to Elasticsearch under one or more contributor
|
||||
license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright
|
||||
ownership. Elasticsearch licenses this file to you under
|
||||
the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
|
@ -1,13 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<additionalHeaders>
|
||||
<javadoc_style>
|
||||
<firstLine>/*</firstLine>
|
||||
<beforeEachLine> * </beforeEachLine>
|
||||
<endLine> */</endLine>
|
||||
<!--skipLine></skipLine-->
|
||||
<firstLineDetectionPattern>(\s|\t)*/\*.*$</firstLineDetectionPattern>
|
||||
<lastLineDetectionPattern>.*\*/(\s|\t)*$</lastLineDetectionPattern>
|
||||
<allowBlankLines>false</allowBlankLines>
|
||||
<isMultiline>true</isMultiline>
|
||||
</javadoc_style>
|
||||
</additionalHeaders>
|
|
@ -10,7 +10,7 @@ The queries in this group are:
|
|||
<<java-query-dsl-geo-shape-query,`geo_shape`>> query::
|
||||
|
||||
Find document with geo-shapes which either intersect, are contained by, or
|
||||
do not interesect with the specified geo-shape.
|
||||
do not intersect with the specified geo-shape.
|
||||
|
||||
<<java-query-dsl-geo-bounding-box-query,`geo_bounding_box`>> query::
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ to your classpath in order to use this type:
|
|||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
// Import ShapeRelationn and ShapeBuilder
|
||||
// Import ShapeRelation and ShapeBuilder
|
||||
import org.elasticsearch.common.geo.ShapeRelation;
|
||||
import org.elasticsearch.common.geo.builders.ShapeBuilder;
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -10,12 +10,16 @@ These examples provide the bare bones needed to get started. For more
|
|||
information about how to write a plugin, we recommend looking at the plugins
|
||||
listed in this documentation for inspiration.
|
||||
|
||||
[float]
|
||||
=== Plugin Structure
|
||||
|
||||
All plugin files must be contained in a directory called `elasticsearch`.
|
||||
|
||||
[float]
|
||||
=== Plugin descriptor file
|
||||
|
||||
All plugins, be they site or Java plugins, must contain a file called
|
||||
`plugin-descriptor.properties` in the root directory. The format for this file
|
||||
is described in detail here:
|
||||
All plugins must contain a file called `plugin-descriptor.properties` in the folder named `elasticsearch`. The format
|
||||
for this file is described in detail here:
|
||||
|
||||
https://github.com/elastic/elasticsearch/blob/master/buildSrc/src/main/resources/plugin-descriptor.properties[`/buildSrc/src/main/resources/plugin-descriptor.properties`].
|
||||
|
||||
|
@ -50,7 +54,7 @@ of nonnegative decimal integers separated by "."'s and may have leading zeros.
|
|||
|
||||
|=======================================================================
|
||||
|
||||
Note that only jar files in the root directory are added to the classpath for the plugin!
|
||||
Note that only jar files in the 'elasticsearch' directory are added to the classpath for the plugin!
|
||||
If you need other resources, package them into a resources jar.
|
||||
|
||||
[IMPORTANT]
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
[[ingest-attachment]]
|
||||
== Ingest Attachment Processor Plugin
|
||||
|
||||
The ingest attachment plugin lets Elasticsearch extract file attachments in common formats (such as PPT, XLS, PDF)
|
||||
using the Apache text extraction library http://lucene.apache.org/tika/[Tika].
|
||||
|
||||
It can be used as replacement for the mapper attachment plugin.
|
||||
|
||||
The source field must be a base64 encoded binary.
|
||||
|
||||
[[ingest-attachment-options]]
|
||||
.Attachment options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `source_field` | yes | - | The field to get the base64 encoded field from
|
||||
| `target_field` | no | attachment | The field that will hold the attachment information
|
||||
| `indexed_chars` | no | 100000 | The number of chars being used for extraction to prevent huge fields. Use `-1` for no limit.
|
||||
| `fields` | no | all | Properties to select to be stored, can be `content`, `title`, `name`, `author`, `keywords`, `date`, `content_type`, `content_length`, `language`
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"description" : "...",
|
||||
"processors" : [
|
||||
{
|
||||
"attachment" : {
|
||||
"source_field" : "data"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
NOTE: Extracting contents from binary data is a resource intensive operation and
|
||||
consumes a lot of resources. It is highly recommended to run pipelines
|
||||
using this processor in a dedicated ingest node.
|
|
@ -43,7 +43,7 @@ releases 2.0 and later do not support rivers.
|
|||
* https://www.elastic.co/guide/en/logstash/current/plugins-inputs-elasticsearch.html[Elasticsearch input to Logstash]
|
||||
The Logstash `elasticsearch` input plugin.
|
||||
* https://www.elastic.co/guide/en/logstash/current/plugins-filters-elasticsearch.html[Elasticsearch event filtering in Logstash]
|
||||
The Logstash `elasticearch` filter plugin.
|
||||
The Logstash `elasticsearch` filter plugin.
|
||||
* https://www.elastic.co/guide/en/logstash/current/plugins-codecs-es_bulk.html[Elasticsearch bulk codec]
|
||||
The Logstash `es_bulk` plugin decodes the Elasticsearch bulk format into individual events.
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
=== Range Aggregation
|
||||
|
||||
A multi-bucket value source based aggregation that enables the user to define a set of ranges - each representing a bucket. During the aggregation process, the values extracted from each document will be checked against each bucket range and "bucket" the relevant/matching document.
|
||||
Note that this aggregration includes the `from` value and excludes the `to` value for each range.
|
||||
Note that this aggregation includes the `from` value and excludes the `to` value for each range.
|
||||
|
||||
Example:
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ tags of the issues the user has commented on:
|
|||
}
|
||||
--------------------------------------------------
|
||||
|
||||
As you can see above, the the `reverse_nested` aggregation is put in to a `nested` aggregation as this is the only place
|
||||
As you can see above, the `reverse_nested` aggregation is put in to a `nested` aggregation as this is the only place
|
||||
in the dsl where the `reversed_nested` aggregation can be used. Its sole purpose is to join back to a parent doc higher
|
||||
up in the nested structure.
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ Credits for the hyphenation code go to the Apache FOP project .
|
|||
[float]
|
||||
=== Dictionary decompounder
|
||||
|
||||
The `dictionary_decompounder` uses a brute force approach in conjuction with
|
||||
The `dictionary_decompounder` uses a brute force approach in conjunction with
|
||||
only the word dictionary to find subwords in a compound word. It is much
|
||||
slower than the hyphenation decompounder but can be used as a first start to
|
||||
check the quality of your dictionary.
|
||||
|
|
|
@ -16,7 +16,7 @@ attribute as follows:
|
|||
------------------------
|
||||
bin/elasticsearch --node.rack rack1 --node.size big <1>
|
||||
------------------------
|
||||
<1> These attribute settings can also be specfied in the `elasticsearch.yml` config file.
|
||||
<1> These attribute settings can also be specified in the `elasticsearch.yml` config file.
|
||||
|
||||
These metadata attributes can be used with the
|
||||
`index.routing.allocation.*` settings to allocate an index to a particular
|
||||
|
|
|
@ -186,7 +186,7 @@ Here is what it looks like when one shard group failed due to pending operations
|
|||
}
|
||||
--------------------------------------------------
|
||||
|
||||
NOTE: The above error is shown when the synced flush failes due to concurrent indexing operations. The HTTP
|
||||
NOTE: The above error is shown when the synced flush fails due to concurrent indexing operations. The HTTP
|
||||
status code in that case will be `409 CONFLICT`.
|
||||
|
||||
Sometimes the failures are specific to a shard copy. The copies that failed will not be eligible for
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
Provides store information for shard copies of indices.
|
||||
Store information reports on which nodes shard copies exist, the shard
|
||||
copy allocation ID, a unique identifer for each shard copy, and any exceptions
|
||||
copy allocation ID, a unique identifier for each shard copy, and any exceptions
|
||||
encountered while opening the shard index or from earlier engine failure.
|
||||
|
||||
By default, only lists store information for shards that have at least one
|
||||
|
|
|
@ -61,7 +61,7 @@ All processors are defined in the following way within a pipeline definition:
|
|||
Each processor defines its own configuration parameters, but all processors have
|
||||
the ability to declare `tag` and `on_failure` fields. These fields are optional.
|
||||
|
||||
A `tag` is simply a string identifier of the specific instatiation of a certain
|
||||
A `tag` is simply a string identifier of the specific instantiation of a certain
|
||||
processor in a pipeline. The `tag` field does not affect any processor's behavior,
|
||||
but is very useful for bookkeeping and tracing errors to specific processors.
|
||||
|
||||
|
@ -1079,7 +1079,7 @@ response:
|
|||
|
||||
It is often useful to see how each processor affects the ingest document
|
||||
as it is passed through the pipeline. To see the intermediate results of
|
||||
each processor in the simulat request, a `verbose` parameter may be added
|
||||
each processor in the simulate request, a `verbose` parameter may be added
|
||||
to the request
|
||||
|
||||
Here is an example verbose request and its response:
|
||||
|
|
|
@ -24,7 +24,7 @@ GET my_index/my_type/1?routing=user1 <2>
|
|||
// AUTOSENSE
|
||||
|
||||
<1> This document uses `user1` as its routing value, instead of its ID.
|
||||
<2> The the same `routing` value needs to be provided when
|
||||
<2> The same `routing` value needs to be provided when
|
||||
<<docs-get,getting>>, <<docs-delete,deleting>>, or <<docs-update,updating>>
|
||||
the document.
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ used for future documents.
|
|||
|
||||
==== Note on documents expiration
|
||||
|
||||
Expired documents will be automatically deleted periodoically. The following
|
||||
Expired documents will be automatically deleted periodically. The following
|
||||
settings control the expiry process:
|
||||
|
||||
`indices.ttl.interval`::
|
||||
|
|
|
@ -22,7 +22,7 @@ are searchable. It accepts three values:
|
|||
This option applies only to `string` fields, for which it is the default.
|
||||
The string field value is first <<analysis,analyzed>> to convert the
|
||||
string into terms (e.g. a list of individual words), which are then
|
||||
indexed. At search time, the the query string is passed through
|
||||
indexed. At search time, the query string is passed through
|
||||
(<<search-analyzer,usually>>) the same analyzer to generate terms
|
||||
in the same format as those in the index. It is this process that enables
|
||||
<<full-text-queries,full text search>>.
|
||||
|
|
|
@ -7,7 +7,7 @@ contain sub-fields, called `properties`. These properties may be of any
|
|||
be added:
|
||||
|
||||
* explicitly by defining them when <<indices-create-index,creating an index>>.
|
||||
* explicitily by defining them when adding or updating a mapping type with the <<indices-put-mapping,PUT mapping>> API.
|
||||
* explicitly by defining them when adding or updating a mapping type with the <<indices-put-mapping,PUT mapping>> API.
|
||||
* <<dynamic-mapping,dynamically>> just by indexing documents containing new fields.
|
||||
|
||||
Below is an example of adding `properties` to a mapping type, an `object`
|
||||
|
|
|
@ -22,7 +22,7 @@ configuration are:
|
|||
|
||||
`BM25`::
|
||||
The Okapi BM25 algorithm.
|
||||
See {defguide}/pluggable-similarites.html[Plugggable Similarity Algorithms]
|
||||
See {defguide}/pluggable-similarites.html[Pluggable Similarity Algorithms]
|
||||
for more information.
|
||||
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ document:
|
|||
<<nested>>:: `nested` for arrays of JSON objects
|
||||
|
||||
[float]
|
||||
=== Geo dataypes
|
||||
=== Geo datatypes
|
||||
|
||||
<<geo-point>>:: `geo_point` for lat/lon points
|
||||
<<geo-shape>>:: `geo_shape` for complex shapes like polygons
|
||||
|
|
|
@ -9,7 +9,7 @@ Fields of type `geo_point` accept latitude-longitude pairs, which can be used:
|
|||
<<query-dsl-geohash-cell-query,geohash>> cell.
|
||||
* to aggregate documents by <<search-aggregations-bucket-geohashgrid-aggregation,geographically>>
|
||||
or by <<search-aggregations-bucket-geodistance-aggregation,distance>> from a central point.
|
||||
* to integerate distance into a document's <<query-dsl-function-score-query,relevance score>>.
|
||||
* to integrate distance into a document's <<query-dsl-function-score-query,relevance score>>.
|
||||
* to <<geo-sorting,sort>> documents by distance.
|
||||
|
||||
There are four ways that a geo-point may be specified, as demonstrated below:
|
||||
|
|
|
@ -44,7 +44,7 @@ The <<cluster-state,`cluster_state`>>, <<cluster-nodes-info,`nodes_info`>>,
|
|||
<<cluster-nodes-stats,`nodes_stats`>> and <<indices-stats,`indices_stats`>>
|
||||
APIs have all been changed to make their format more RESTful and less clumsy.
|
||||
|
||||
For instance, if you just want the `nodes` section of the the `cluster_state`,
|
||||
For instance, if you just want the `nodes` section of the `cluster_state`,
|
||||
instead of:
|
||||
|
||||
[source,sh]
|
||||
|
@ -320,7 +320,7 @@ longer be used to return whole objects and it no longer accepts the
|
|||
parameters instead.
|
||||
|
||||
* Settings, like `index.analysis.analyzer.default` are now returned as proper
|
||||
nested JSON objects, which makes them easier to work with programatically:
|
||||
nested JSON objects, which makes them easier to work with programmatically:
|
||||
+
|
||||
[source,js]
|
||||
---------------
|
||||
|
|
|
@ -25,7 +25,7 @@ Index templates can no longer be configured on disk. Use the
|
|||
==== Analyze API changes
|
||||
|
||||
|
||||
The Analyze API now returns the the `position` of the first token as `0`
|
||||
The Analyze API now returns the `position` of the first token as `0`
|
||||
instead of `1`.
|
||||
|
||||
The `prefer_local` parameter has been removed. The `_analyze` API is a light
|
||||
|
|
|
@ -153,7 +153,7 @@ PUT my_index
|
|||
}
|
||||
}
|
||||
----------------------------
|
||||
<1> These two fields cannot be distinguised as both are referred to as `foo.bar`.
|
||||
<1> These two fields cannot be distinguished as both are referred to as `foo.bar`.
|
||||
|
||||
You can no longer create fields with dots in the name.
|
||||
|
||||
|
|
|
@ -346,6 +346,8 @@ disable doc values is by using the `doc_values` property of mappings.
|
|||
=== Plugin changes
|
||||
|
||||
The command `bin/plugin` has been renamed to `bin/elasticsearch-plugin`.
|
||||
The structure of the plugin has changed. All the plugin files must be contained in a directory called `elasticsearch`.
|
||||
If you use the gradle build, this structure is automatically generated.
|
||||
|
||||
==== Site plugins removed
|
||||
|
||||
|
@ -550,7 +552,7 @@ Removing individual setters for lon() and lat() values, both values should be se
|
|||
Removing setters for to(Object ...) and from(Object ...) in favour of the only two allowed input
|
||||
arguments (String, Number). Removing setter for center point (point(), geohash()) because parameter
|
||||
is mandatory and should already be set in constructor.
|
||||
Also removing setters for lt(), lte(), gt(), gte() since they can all be replaced by equivallent
|
||||
Also removing setters for lt(), lte(), gt(), gte() since they can all be replaced by equivalent
|
||||
calls to to/from() and inludeLower()/includeUpper().
|
||||
|
||||
==== GeoPolygonQueryBuilder
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue