Merge branch 'master' into feature/sql_2

Original commit: elastic/x-pack-elasticsearch@6ba2651f46
This commit is contained in:
Nik Everett 2018-02-01 17:21:37 -05:00
commit f66e01369a
77 changed files with 372 additions and 5140 deletions

View File

@ -1,27 +1,17 @@
[[elasticsearch-reference]]
= Elasticsearch Reference
:include-xpack: true
:xes-repo-dir: {docdir}
:es-repo-dir: {docdir}/../../../../elasticsearch/docs
:es-test-dir: {docdir}/../../../../elasticsearch/docs/src/test
:plugins-examples-dir: {docdir}/../../../../elasticsearch/plugins/examples
include::{es-repo-dir}/Versions.asciidoc[]
include::{es-repo-dir}/reference/index-shared1.asciidoc[]
include::{es-repo-dir}/index-shared1.asciidoc[]
:edit_url!:
include::setup/setup-xes.asciidoc[]
:edit_url:
include::{es-repo-dir}/reference/index-shared2.asciidoc[]
include::{es-repo-dir}/index-shared2.asciidoc[]
:edit_url!:
include::release-notes/xpack-breaking.asciidoc[]
:edit_url:
include::{es-repo-dir}/reference/index-shared3.asciidoc[]
include::{es-repo-dir}/index-shared3.asciidoc[]
:edit_url!:
include::sql/index.asciidoc[]
@ -36,10 +26,10 @@ include::rest-api/index.asciidoc[]
include::commands/index.asciidoc[]
:edit_url:
include::{es-repo-dir}/reference/index-shared4.asciidoc[]
include::{es-repo-dir}/index-shared4.asciidoc[]
:edit_url!:
include::release-notes/xpack-xes.asciidoc[]
:edit_url:
include::{es-repo-dir}/reference/index-shared5.asciidoc[]
include::{es-repo-dir}/index-shared5.asciidoc[]

View File

@ -1,6 +1,6 @@
distributionUrl=https\://services.gradle.org/distributions/gradle-4.5-all.zip
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-4.3-all.zip
distributionSha256Sum=b3afcc2d5aaf4d23eeab2409d64c54046147322d05acc7fb5a63f84d8a2b8bd7
zipStoreBase=GRADLE_USER_HOME
distributionSha256Sum=6ac2f8f9302f50241bf14cc5f4a3d88504ad20e61bb98c5fd048f7723b61397e

View File

@ -1,5 +1,3 @@
import com.carrotsearch.gradle.junit4.RandomizedTestingTask
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.MavenFilteringHack
import java.nio.file.Files
@ -154,25 +152,5 @@ thirdPartyAudit.excludes = [
// xpack modules are installed in real clusters as the meta plugin, so
// installing them as individual plugins for integ tests doesn't make sense,
// so we disable integ tests
// so we disable integ tests and there are no integ tests in xpack core module
integTest.enabled = false
// Instead we create a separate task to run the
// tests based on ESIntegTestCase
task internalClusterTest(type: RandomizedTestingTask,
group: JavaBasePlugin.VERIFICATION_GROUP,
description: 'Multi-node tests',
dependsOn: test.dependsOn) {
configure(BuildPlugin.commonTestConfig(project))
classpath = project.test.classpath
testClassesDir = project.test.testClassesDir
include '**/*IT.class'
systemProperty 'es.set.netty.runtime.available.processors', 'false'
}
check.dependsOn internalClusterTest
internalClusterTest.mustRunAfter test
// also add an "alias" task to make typing on the command line easier
task icTest {
dependsOn internalClusterTest
}

View File

@ -85,13 +85,13 @@ import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState;
import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus;
import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage;
import org.elasticsearch.xpack.core.persistent.CompletionPersistentTaskAction;
import org.elasticsearch.xpack.core.persistent.PersistentTaskParams;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksNodeService;
import org.elasticsearch.xpack.core.persistent.RemovePersistentTaskAction;
import org.elasticsearch.xpack.core.persistent.StartPersistentTaskAction;
import org.elasticsearch.xpack.core.persistent.UpdatePersistentTaskStatusAction;
import org.elasticsearch.persistent.CompletionPersistentTaskAction;
import org.elasticsearch.persistent.PersistentTaskParams;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksNodeService;
import org.elasticsearch.persistent.RemovePersistentTaskAction;
import org.elasticsearch.persistent.StartPersistentTaskAction;
import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction;
import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage;
import org.elasticsearch.xpack.core.security.SecurityField;
import org.elasticsearch.xpack.core.security.SecuritySettings;

View File

@ -35,8 +35,8 @@ import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.core.ml.utils.NameResolver;
import org.elasticsearch.xpack.core.ml.utils.ToXContentParams;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.io.IOException;
import java.util.Collection;

View File

@ -26,7 +26,7 @@ import org.elasticsearch.tasks.Task;
import org.elasticsearch.xpack.core.ml.MachineLearningField;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.core.persistent.PersistentTaskParams;
import org.elasticsearch.persistent.PersistentTaskParams;
import java.io.IOException;
import java.util.Objects;

View File

@ -27,7 +27,7 @@ import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.core.persistent.PersistentTaskParams;
import org.elasticsearch.persistent.PersistentTaskParams;
import java.io.IOException;
import java.util.Objects;

View File

@ -14,7 +14,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.xpack.core.ml.action.OpenJobAction;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.io.IOException;
import java.util.Objects;

View File

@ -5,6 +5,8 @@
*/
package org.elasticsearch.xpack.core.ml.utils.time;
import org.elasticsearch.cli.SuppressForbidden;
import java.time.DateTimeException;
import java.time.Instant;
import java.time.LocalDate;
@ -84,6 +86,11 @@ public class DateTimeFormatterTimestampConverter implements TimestampConverter {
if (hasTimeZone) {
return Instant.from(parsed);
}
return toInstantUnsafelyIgnoringAmbiguity(parsed);
}
@SuppressForbidden(reason = "TODO https://github.com/elastic/x-pack-elasticsearch/issues/3810")
private Instant toInstantUnsafelyIgnoringAmbiguity(TemporalAccessor parsed) {
return LocalDateTime.from(parsed).atZone(defaultZoneId).toInstant();
}
}

View File

@ -1,165 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.tasks.TaskManager;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Predicate;
/**
* Represents a executor node operation that corresponds to a persistent task
*/
public class AllocatedPersistentTask extends CancellableTask {
private volatile String persistentTaskId;
private volatile long allocationId;
private final AtomicReference<State> state;
@Nullable
private volatile Exception failure;
private volatile PersistentTasksService persistentTasksService;
private volatile Logger logger;
private volatile TaskManager taskManager;
public AllocatedPersistentTask(long id, String type, String action, String description, TaskId parentTask,
Map<String, String> headers) {
super(id, type, action, description, parentTask, headers);
this.state = new AtomicReference<>(State.STARTED);
}
@Override
public boolean shouldCancelChildrenOnCancellation() {
return true;
}
// In case of persistent tasks we always need to return: `false`
// because in case of persistent task the parent task isn't a task in the task manager, but in cluster state.
// This instructs the task manager not to try to kill this persistent task when the task manager cannot find
// a fake parent node id "cluster" in the cluster state
@Override
public final boolean cancelOnParentLeaving() {
return false;
}
@Override
public Status getStatus() {
return new PersistentTasksNodeService.Status(state.get());
}
/**
* Updates the persistent state for the corresponding persistent task.
* <p>
* This doesn't affect the status of this allocated task.
*/
public void updatePersistentStatus(Task.Status status, ActionListener<PersistentTasksCustomMetaData.PersistentTask<?>> listener) {
persistentTasksService.updateStatus(persistentTaskId, allocationId, status, listener);
}
public String getPersistentTaskId() {
return persistentTaskId;
}
void init(PersistentTasksService persistentTasksService, TaskManager taskManager, Logger logger, String persistentTaskId, long
allocationId) {
this.persistentTasksService = persistentTasksService;
this.logger = logger;
this.taskManager = taskManager;
this.persistentTaskId = persistentTaskId;
this.allocationId = allocationId;
}
public Exception getFailure() {
return failure;
}
boolean markAsCancelled() {
return state.compareAndSet(AllocatedPersistentTask.State.STARTED, AllocatedPersistentTask.State.PENDING_CANCEL);
}
public State getState() {
return state.get();
}
public long getAllocationId() {
return allocationId;
}
public enum State {
STARTED, // the task is currently running
PENDING_CANCEL, // the task is cancelled on master, cancelling it locally
COMPLETED // the task is done running and trying to notify caller
}
/**
* Waits for this persistent task to have the desired state.
*/
public void waitForPersistentTaskStatus(Predicate<PersistentTasksCustomMetaData.PersistentTask<?>> predicate,
@Nullable TimeValue timeout,
PersistentTasksService.WaitForPersistentTaskStatusListener<?> listener) {
persistentTasksService.waitForPersistentTaskStatus(persistentTaskId, predicate, timeout, listener);
}
public void markAsCompleted() {
completeAndNotifyIfNeeded(null);
}
public void markAsFailed(Exception e) {
if (CancelTasksRequest.DEFAULT_REASON.equals(getReasonCancelled())) {
completeAndNotifyIfNeeded(null);
} else {
completeAndNotifyIfNeeded(e);
}
}
private void completeAndNotifyIfNeeded(@Nullable Exception failure) {
State prevState = state.getAndSet(AllocatedPersistentTask.State.COMPLETED);
if (prevState == State.COMPLETED) {
logger.warn("attempt to complete task [{}] with id [{}] in the [{}] state", getAction(), getPersistentTaskId(), prevState);
} else {
if (failure != null) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage(
"task {} failed with an exception", getPersistentTaskId()), failure);
}
try {
this.failure = failure;
if (prevState == State.STARTED) {
logger.trace("sending notification for completed task [{}] with id [{}]", getAction(), getPersistentTaskId());
persistentTasksService.sendCompletionNotification(getPersistentTaskId(), getAllocationId(), failure, new
ActionListener<PersistentTasksCustomMetaData.PersistentTask<?>>() {
@Override
public void onResponse(PersistentTasksCustomMetaData.PersistentTask<?> persistentTask) {
logger.trace("notification for task [{}] with id [{}] was successful", getAction(),
getPersistentTaskId());
}
@Override
public void onFailure(Exception e) {
logger.warn((Supplier<?>) () ->
new ParameterizedMessage("notification for task [{}] with id [{}] failed",
getAction(), getPersistentTaskId()), e);
}
});
}
} finally {
taskManager.unregister(this);
}
}
}
}

View File

@ -1,178 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.io.IOException;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
/**
* Action that is used by executor node to indicate that the persistent action finished or failed on the node and needs to be
* removed from the cluster state in case of successful completion or restarted on some other node in case of failure.
*/
public class CompletionPersistentTaskAction extends Action<CompletionPersistentTaskAction.Request,
PersistentTaskResponse,
CompletionPersistentTaskAction.RequestBuilder> {
public static final CompletionPersistentTaskAction INSTANCE = new CompletionPersistentTaskAction();
public static final String NAME = "cluster:admin/persistent/completion";
private CompletionPersistentTaskAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public PersistentTaskResponse newResponse() {
return new PersistentTaskResponse();
}
public static class Request extends MasterNodeRequest<Request> {
private String taskId;
private Exception exception;
private long allocationId = -1;
public Request() {
}
public Request(String taskId, long allocationId, Exception exception) {
this.taskId = taskId;
this.exception = exception;
this.allocationId = allocationId;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
taskId = in.readString();
allocationId = in.readLong();
exception = in.readException();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(taskId);
out.writeLong(allocationId);
out.writeException(exception);
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (taskId == null) {
validationException = addValidationError("task id is missing", validationException);
}
if (allocationId < 0) {
validationException = addValidationError("allocation id is negative or missing", validationException);
}
return validationException;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Request request = (Request) o;
return Objects.equals(taskId, request.taskId) &&
allocationId == request.allocationId &&
Objects.equals(exception, request.exception);
}
@Override
public int hashCode() {
return Objects.hash(taskId, allocationId, exception);
}
}
public static class RequestBuilder extends MasterNodeOperationRequestBuilder<CompletionPersistentTaskAction.Request,
PersistentTaskResponse, CompletionPersistentTaskAction.RequestBuilder> {
protected RequestBuilder(ElasticsearchClient client, CompletionPersistentTaskAction action) {
super(client, action, new Request());
}
}
public static class TransportAction extends TransportMasterNodeAction<Request, PersistentTaskResponse> {
private final PersistentTasksClusterService persistentTasksClusterService;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters,
PersistentTasksClusterService persistentTasksClusterService,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, CompletionPersistentTaskAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.persistentTasksClusterService = persistentTasksClusterService;
}
@Override
protected String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
protected PersistentTaskResponse newResponse() {
return new PersistentTaskResponse();
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
// Cluster is not affected but we look up repositories in metadata
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
@Override
protected final void masterOperation(final Request request, ClusterState state,
final ActionListener<PersistentTaskResponse> listener) {
persistentTasksClusterService.completePersistentTask(request.taskId, request.allocationId, request.exception,
new ActionListener<PersistentTask<?>>() {
@Override
public void onResponse(PersistentTask<?> task) {
listener.onResponse(new PersistentTaskResponse(task));
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
}
}

View File

@ -1,49 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
/**
* This component is responsible for execution of persistent tasks.
*
* It abstracts away the execution of tasks and greatly simplifies testing of PersistentTasksNodeService
*/
public class NodePersistentTasksExecutor {
private final ThreadPool threadPool;
public NodePersistentTasksExecutor(ThreadPool threadPool) {
this.threadPool = threadPool;
}
public <Params extends PersistentTaskParams> void executeTask(@Nullable Params params,
@Nullable Task.Status status,
AllocatedPersistentTask task,
PersistentTasksExecutor<Params> executor) {
threadPool.executor(executor.getExecutor()).execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
task.markAsFailed(e);
}
@SuppressWarnings("unchecked")
@Override
protected void doRun() throws Exception {
try {
executor.nodeOperation(task, params, status);
} catch (Exception ex) {
task.markAsFailed(ex);
}
}
});
}
}

View File

@ -1,16 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.xcontent.ToXContentObject;
/**
* Parameters used to start persistent task
*/
public interface PersistentTaskParams extends NamedWriteable, ToXContentObject {
}

View File

@ -1,58 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.io.IOException;
import java.util.Objects;
/**
* Response upon a successful start or an persistent task
*/
public class PersistentTaskResponse extends ActionResponse {
private PersistentTask<?> task;
public PersistentTaskResponse() {
super();
}
public PersistentTaskResponse(PersistentTask<?> task) {
this.task = task;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
task = in.readOptionalWriteable(PersistentTask::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalWriteable(task);
}
public PersistentTask<?> getTask() {
return task;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PersistentTaskResponse that = (PersistentTaskResponse) o;
return Objects.equals(task, that.task);
}
@Override
public int hashCode() {
return Objects.hash(task);
}
}

View File

@ -1,315 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.Assignment;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.util.Objects;
/**
* Component that runs only on the master node and is responsible for assigning running tasks to nodes
*/
public class PersistentTasksClusterService extends AbstractComponent implements ClusterStateListener {
private final ClusterService clusterService;
private final PersistentTasksExecutorRegistry registry;
public PersistentTasksClusterService(Settings settings, PersistentTasksExecutorRegistry registry, ClusterService clusterService) {
super(settings);
this.clusterService = clusterService;
clusterService.addListener(this);
this.registry = registry;
}
/**
* Creates a new persistent task on master node
*
* @param action the action name
* @param params params
* @param listener the listener that will be called when task is started
*/
public <Params extends PersistentTaskParams> void createPersistentTask(String taskId, String action, @Nullable Params params,
ActionListener<PersistentTask<?>> listener) {
clusterService.submitStateUpdateTask("create persistent task", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
PersistentTasksCustomMetaData.Builder builder = builder(currentState);
if (builder.hasTask(taskId)) {
throw new ResourceAlreadyExistsException("task with id {" + taskId + "} already exist");
}
validate(action, currentState, params);
final Assignment assignment;
assignment = getAssignement(action, currentState, params);
return update(currentState, builder.addTask(taskId, action, params, assignment));
}
@Override
public void onFailure(String source, Exception e) {
listener.onFailure(e);
}
@SuppressWarnings("unchecked")
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
PersistentTasksCustomMetaData tasks = newState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
if (tasks != null) {
listener.onResponse(tasks.getTask(taskId));
} else {
listener.onResponse(null);
}
}
});
}
/**
* Restarts a record about a running persistent task from cluster state
*
* @param id the id of the persistent task
* @param allocationId the allocation id of the persistent task
* @param failure the reason for restarting the task or null if the task completed successfully
* @param listener the listener that will be called when task is removed
*/
public void completePersistentTask(String id, long allocationId, Exception failure, ActionListener<PersistentTask<?>> listener) {
final String source;
if (failure != null) {
logger.warn("persistent task " + id + " failed", failure);
source = "finish persistent task (failed)";
} else {
source = "finish persistent task (success)";
}
clusterService.submitStateUpdateTask(source, new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
PersistentTasksCustomMetaData.Builder tasksInProgress = builder(currentState);
if (tasksInProgress.hasTask(id, allocationId)) {
tasksInProgress.finishTask(id);
return update(currentState, tasksInProgress);
} else {
if (tasksInProgress.hasTask(id)) {
logger.warn("The task [{}] with id [{}] was found but it has a different allocation id [{}], status is not updated",
PersistentTasksCustomMetaData.getTaskWithId(currentState, id).getTaskName(), id, allocationId);
} else {
logger.warn("The task [{}] wasn't found, status is not updated", id);
}
throw new ResourceNotFoundException("the task with id [" + id + "] and allocation id [" + allocationId + "] not found");
}
}
@Override
public void onFailure(String source, Exception e) {
listener.onFailure(e);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
// Using old state since in the new state the task is already gone
listener.onResponse(PersistentTasksCustomMetaData.getTaskWithId(oldState, id));
}
});
}
/**
* Removes the persistent task
*
* @param id the id of a persistent task
* @param listener the listener that will be called when task is removed
*/
public void removePersistentTask(String id, ActionListener<PersistentTask<?>> listener) {
clusterService.submitStateUpdateTask("remove persistent task", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
PersistentTasksCustomMetaData.Builder tasksInProgress = builder(currentState);
if (tasksInProgress.hasTask(id)) {
return update(currentState, tasksInProgress.removeTask(id));
} else {
throw new ResourceNotFoundException("the task with id {} doesn't exist", id);
}
}
@Override
public void onFailure(String source, Exception e) {
listener.onFailure(e);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
// Using old state since in the new state the task is already gone
listener.onResponse(PersistentTasksCustomMetaData.getTaskWithId(oldState, id));
}
});
}
/**
* Update task status
*
* @param id the id of a persistent task
* @param allocationId the expected allocation id of the persistent task
* @param status new status
* @param listener the listener that will be called when task is removed
*/
public void updatePersistentTaskStatus(String id, long allocationId, Task.Status status, ActionListener<PersistentTask<?>> listener) {
clusterService.submitStateUpdateTask("update task status", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
PersistentTasksCustomMetaData.Builder tasksInProgress = builder(currentState);
if (tasksInProgress.hasTask(id, allocationId)) {
return update(currentState, tasksInProgress.updateTaskStatus(id, status));
} else {
if (tasksInProgress.hasTask(id)) {
logger.warn("trying to update status on task {} with unexpected allocation id {}", id, allocationId);
} else {
logger.warn("trying to update status on non-existing task {}", id);
}
throw new ResourceNotFoundException("the task with id {} and allocation id {} doesn't exist", id, allocationId);
}
}
@Override
public void onFailure(String source, Exception e) {
listener.onFailure(e);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
listener.onResponse(PersistentTasksCustomMetaData.getTaskWithId(newState, id));
}
});
}
private <Params extends PersistentTaskParams> Assignment getAssignement(String taskName, ClusterState currentState,
@Nullable Params params) {
PersistentTasksExecutor<Params> persistentTasksExecutor = registry.getPersistentTaskExecutorSafe(taskName);
return persistentTasksExecutor.getAssignment(params, currentState);
}
private <Params extends PersistentTaskParams> void validate(String taskName, ClusterState currentState, @Nullable Params params) {
PersistentTasksExecutor<Params> persistentTasksExecutor = registry.getPersistentTaskExecutorSafe(taskName);
persistentTasksExecutor.validate(params, currentState);
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (event.localNodeMaster()) {
logger.trace("checking task reassignment for cluster state {}", event.state().getVersion());
if (reassignmentRequired(event, this::getAssignement)) {
logger.trace("task reassignment is needed");
reassignTasks();
} else {
logger.trace("task reassignment is not needed");
}
}
}
interface ExecutorNodeDecider {
<Params extends PersistentTaskParams> Assignment getAssignment(String action, ClusterState currentState, Params params);
}
static boolean reassignmentRequired(ClusterChangedEvent event, ExecutorNodeDecider decider) {
PersistentTasksCustomMetaData tasks = event.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
PersistentTasksCustomMetaData prevTasks = event.previousState().getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
if (tasks != null && (Objects.equals(tasks, prevTasks) == false ||
event.nodesChanged() ||
event.routingTableChanged() ||
event.previousState().nodes().isLocalNodeElectedMaster() == false)) {
// We need to check if removed nodes were running any of the tasks and reassign them
boolean reassignmentRequired = false;
for (PersistentTask<?> taskInProgress : tasks.tasks()) {
if (taskInProgress.needsReassignment(event.state().nodes())) {
// there is an unassigned task or task with a disappeared node - we need to try assigning it
if (Objects.equals(taskInProgress.getAssignment(),
decider.getAssignment(taskInProgress.getTaskName(), event.state(), taskInProgress.getParams())) == false) {
// it looks like a assignment for at least one task is possible - let's trigger reassignment
reassignmentRequired = true;
break;
}
}
}
return reassignmentRequired;
}
return false;
}
/**
* Evaluates the cluster state and tries to assign tasks to nodes
*/
public void reassignTasks() {
clusterService.submitStateUpdateTask("reassign persistent tasks", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
return reassignTasks(currentState, logger, PersistentTasksClusterService.this::getAssignement);
}
@Override
public void onFailure(String source, Exception e) {
logger.warn("Unsuccessful persistent task reassignment", e);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
});
}
static ClusterState reassignTasks(ClusterState currentState, Logger logger, ExecutorNodeDecider decider) {
PersistentTasksCustomMetaData tasks = currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
ClusterState clusterState = currentState;
DiscoveryNodes nodes = currentState.nodes();
if (tasks != null) {
logger.trace("reassigning {} persistent tasks", tasks.tasks().size());
// We need to check if removed nodes were running any of the tasks and reassign them
for (PersistentTask<?> task : tasks.tasks()) {
if (task.needsReassignment(nodes)) {
// there is an unassigned task - we need to try assigning it
Assignment assignment = decider.getAssignment(task.getTaskName(), clusterState, task.getParams());
if (Objects.equals(assignment, task.getAssignment()) == false) {
logger.trace("reassigning task {} from node {} to node {}", task.getId(),
task.getAssignment().getExecutorNode(), assignment.getExecutorNode());
clusterState = update(clusterState, builder(clusterState).reassignTask(task.getId(), assignment));
} else {
logger.trace("ignoring task {} because assignment is the same {}", task.getId(), assignment);
}
} else {
logger.trace("ignoring task {} because it is still running", task.getId());
}
}
}
return clusterState;
}
private static PersistentTasksCustomMetaData.Builder builder(ClusterState currentState) {
return PersistentTasksCustomMetaData.builder(currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE));
}
private static ClusterState update(ClusterState currentState, PersistentTasksCustomMetaData.Builder tasksInProgress) {
if (tasksInProgress.isChanged()) {
return ClusterState.builder(currentState).metaData(
MetaData.builder(currentState.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE, tasksInProgress.build())
).build();
} else {
return currentState;
}
}
}

View File

@ -1,679 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.AbstractNamedDiffable;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser.NamedObjectParser;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.Task.Status;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import static org.elasticsearch.cluster.metadata.MetaData.ALL_CONTEXTS;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
/**
* A cluster state record that contains a list of all running persistent tasks
*/
public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable<MetaData.Custom> implements MetaData.Custom {
public static final String TYPE = "persistent_tasks";
private static final String API_CONTEXT = MetaData.XContentContext.API.toString();
// TODO: Implement custom Diff for tasks
private final Map<String, PersistentTask<?>> tasks;
private final long lastAllocationId;
public PersistentTasksCustomMetaData(long lastAllocationId, Map<String, PersistentTask<?>> tasks) {
this.lastAllocationId = lastAllocationId;
this.tasks = tasks;
}
private static final ObjectParser<Builder, Void> PERSISTENT_TASKS_PARSER = new ObjectParser<>(TYPE, Builder::new);
private static final ObjectParser<TaskBuilder<PersistentTaskParams>, Void> PERSISTENT_TASK_PARSER =
new ObjectParser<>("tasks", TaskBuilder::new);
public static final ConstructingObjectParser<Assignment, Void> ASSIGNMENT_PARSER =
new ConstructingObjectParser<>("assignment", objects -> new Assignment((String) objects[0], (String) objects[1]));
private static final NamedObjectParser<TaskDescriptionBuilder<PersistentTaskParams>, Void> TASK_DESCRIPTION_PARSER;
static {
// Tasks parser initialization
PERSISTENT_TASKS_PARSER.declareLong(Builder::setLastAllocationId, new ParseField("last_allocation_id"));
PERSISTENT_TASKS_PARSER.declareObjectArray(Builder::setTasks, PERSISTENT_TASK_PARSER, new ParseField("tasks"));
// Task description parser initialization
ObjectParser<TaskDescriptionBuilder<PersistentTaskParams>, String> parser = new ObjectParser<>("named");
parser.declareObject(TaskDescriptionBuilder::setParams,
(p, c) -> p.namedObject(PersistentTaskParams.class, c, null), new ParseField("params"));
parser.declareObject(TaskDescriptionBuilder::setStatus,
(p, c) -> p.namedObject(Status.class, c, null), new ParseField("status"));
TASK_DESCRIPTION_PARSER = (XContentParser p, Void c, String name) -> parser.parse(p, new TaskDescriptionBuilder<>(name), name);
// Assignment parser
ASSIGNMENT_PARSER.declareStringOrNull(constructorArg(), new ParseField("executor_node"));
ASSIGNMENT_PARSER.declareStringOrNull(constructorArg(), new ParseField("explanation"));
// Task parser initialization
PERSISTENT_TASK_PARSER.declareString(TaskBuilder::setId, new ParseField("id"));
PERSISTENT_TASK_PARSER.declareString(TaskBuilder::setTaskName, new ParseField("name"));
PERSISTENT_TASK_PARSER.declareLong(TaskBuilder::setAllocationId, new ParseField("allocation_id"));
PERSISTENT_TASK_PARSER.declareNamedObjects(
(TaskBuilder<PersistentTaskParams> taskBuilder, List<TaskDescriptionBuilder<PersistentTaskParams>> objects) -> {
if (objects.size() != 1) {
throw new IllegalArgumentException("only one task description per task is allowed");
}
TaskDescriptionBuilder<PersistentTaskParams> builder = objects.get(0);
taskBuilder.setTaskName(builder.taskName);
taskBuilder.setParams(builder.params);
taskBuilder.setStatus(builder.status);
}, TASK_DESCRIPTION_PARSER, new ParseField("task"));
PERSISTENT_TASK_PARSER.declareObject(TaskBuilder::setAssignment, ASSIGNMENT_PARSER, new ParseField("assignment"));
PERSISTENT_TASK_PARSER.declareLong(TaskBuilder::setAllocationIdOnLastStatusUpdate,
new ParseField("allocation_id_on_last_status_update"));
}
/**
* Private builder used in XContent parser to build task-specific portion (params and status)
*/
private static class TaskDescriptionBuilder<Params extends PersistentTaskParams> {
private final String taskName;
private Params params;
private Status status;
private TaskDescriptionBuilder(String taskName) {
this.taskName = taskName;
}
private TaskDescriptionBuilder setParams(Params params) {
this.params = params;
return this;
}
private TaskDescriptionBuilder setStatus(Status status) {
this.status = status;
return this;
}
}
public Collection<PersistentTask<?>> tasks() {
return this.tasks.values();
}
public Map<String, PersistentTask<?>> taskMap() {
return this.tasks;
}
public PersistentTask<?> getTask(String id) {
return this.tasks.get(id);
}
public Collection<PersistentTask<?>> findTasks(String taskName, Predicate<PersistentTask<?>> predicate) {
return this.tasks().stream()
.filter(p -> taskName.equals(p.getTaskName()))
.filter(predicate)
.collect(Collectors.toList());
}
public boolean tasksExist(String taskName, Predicate<PersistentTask<?>> predicate) {
return this.tasks().stream()
.filter(p -> taskName.equals(p.getTaskName()))
.anyMatch(predicate);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PersistentTasksCustomMetaData that = (PersistentTasksCustomMetaData) o;
return lastAllocationId == that.lastAllocationId &&
Objects.equals(tasks, that.tasks);
}
@Override
public int hashCode() {
return Objects.hash(tasks, lastAllocationId);
}
@Override
public String toString() {
return Strings.toString(this);
}
public long getNumberOfTasksOnNode(String nodeId, String taskName) {
return tasks.values().stream().filter(
task -> taskName.equals(task.taskName) && nodeId.equals(task.assignment.executorNode)).count();
}
@Override
public Version getMinimalSupportedVersion() {
return Version.V_5_4_0;
}
@Override
public EnumSet<MetaData.XContentContext> context() {
return ALL_CONTEXTS;
}
public static PersistentTasksCustomMetaData fromXContent(XContentParser parser) throws IOException {
return PERSISTENT_TASKS_PARSER.parse(parser, null).build();
}
@SuppressWarnings("unchecked")
public static <Params extends PersistentTaskParams> PersistentTask<Params> getTaskWithId(ClusterState clusterState, String taskId) {
PersistentTasksCustomMetaData tasks = clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE);
if (tasks != null) {
return (PersistentTask<Params>) tasks.getTask(taskId);
}
return null;
}
public static class Assignment {
@Nullable
private final String executorNode;
private final String explanation;
public Assignment(String executorNode, String explanation) {
this.executorNode = executorNode;
assert explanation != null;
this.explanation = explanation;
}
@Nullable
public String getExecutorNode() {
return executorNode;
}
public String getExplanation() {
return explanation;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Assignment that = (Assignment) o;
return Objects.equals(executorNode, that.executorNode) &&
Objects.equals(explanation, that.explanation);
}
@Override
public int hashCode() {
return Objects.hash(executorNode, explanation);
}
public boolean isAssigned() {
return executorNode != null;
}
@Override
public String toString() {
return "node: [" + executorNode + "], explanation: [" + explanation + "]";
}
}
public static final Assignment INITIAL_ASSIGNMENT = new Assignment(null, "waiting for initial assignment");
/**
* A record that represents a single running persistent task
*/
public static class PersistentTask<P extends PersistentTaskParams> implements Writeable, ToXContentObject {
private final String id;
private final long allocationId;
private final String taskName;
@Nullable
private final P params;
@Nullable
private final Status status;
private final Assignment assignment;
@Nullable
private final Long allocationIdOnLastStatusUpdate;
public PersistentTask(String id, String taskName, P params, long allocationId, Assignment assignment) {
this(id, allocationId, taskName, params, null, assignment, null);
}
public PersistentTask(PersistentTask<P> task, long allocationId, Assignment assignment) {
this(task.id, allocationId, task.taskName, task.params, task.status,
assignment, task.allocationId);
}
public PersistentTask(PersistentTask<P> task, Status status) {
this(task.id, task.allocationId, task.taskName, task.params, status,
task.assignment, task.allocationId);
}
private PersistentTask(String id, long allocationId, String taskName, P params,
Status status, Assignment assignment, Long allocationIdOnLastStatusUpdate) {
this.id = id;
this.allocationId = allocationId;
this.taskName = taskName;
this.params = params;
this.status = status;
this.assignment = assignment;
this.allocationIdOnLastStatusUpdate = allocationIdOnLastStatusUpdate;
if (params != null) {
if (params.getWriteableName().equals(taskName) == false) {
throw new IllegalArgumentException("params have to have the same writeable name as task. params: " +
params.getWriteableName() + " task: " + taskName);
}
}
if (status != null) {
if (status.getWriteableName().equals(taskName) == false) {
throw new IllegalArgumentException("status has to have the same writeable name as task. status: " +
status.getWriteableName() + " task: " + taskName);
}
}
}
@SuppressWarnings("unchecked")
public PersistentTask(StreamInput in) throws IOException {
id = in.readString();
allocationId = in.readLong();
taskName = in.readString();
params = (P) in.readOptionalNamedWriteable(PersistentTaskParams.class);
status = in.readOptionalNamedWriteable(Task.Status.class);
assignment = new Assignment(in.readOptionalString(), in.readString());
allocationIdOnLastStatusUpdate = in.readOptionalLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(id);
out.writeLong(allocationId);
out.writeString(taskName);
out.writeOptionalNamedWriteable(params);
out.writeOptionalNamedWriteable(status);
out.writeOptionalString(assignment.executorNode);
out.writeString(assignment.explanation);
out.writeOptionalLong(allocationIdOnLastStatusUpdate);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PersistentTask<?> that = (PersistentTask<?>) o;
return Objects.equals(id, that.id) &&
allocationId == that.allocationId &&
Objects.equals(taskName, that.taskName) &&
Objects.equals(params, that.params) &&
Objects.equals(status, that.status) &&
Objects.equals(assignment, that.assignment) &&
Objects.equals(allocationIdOnLastStatusUpdate, that.allocationIdOnLastStatusUpdate);
}
@Override
public int hashCode() {
return Objects.hash(id, allocationId, taskName, params, status, assignment,
allocationIdOnLastStatusUpdate);
}
@Override
public String toString() {
return Strings.toString(this);
}
public String getId() {
return id;
}
public long getAllocationId() {
return allocationId;
}
public String getTaskName() {
return taskName;
}
@Nullable
public P getParams() {
return params;
}
@Nullable
public String getExecutorNode() {
return assignment.executorNode;
}
public Assignment getAssignment() {
return assignment;
}
public boolean isAssigned() {
return assignment.isAssigned();
}
/**
* Returns true if the tasks is not stopped and unassigned or assigned to a non-existing node.
*/
public boolean needsReassignment(DiscoveryNodes nodes) {
return (assignment.isAssigned() == false || nodes.nodeExists(assignment.getExecutorNode()) == false);
}
@Nullable
public Status getStatus() {
return status;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params xParams) throws IOException {
builder.startObject();
{
builder.field("id", id);
builder.startObject("task");
{
builder.startObject(taskName);
{
if (params != null) {
builder.field("params", params, xParams);
}
if (status != null) {
builder.field("status", status, xParams);
}
}
builder.endObject();
}
builder.endObject();
if (API_CONTEXT.equals(xParams.param(MetaData.CONTEXT_MODE_PARAM, API_CONTEXT))) {
// These are transient values that shouldn't be persisted to gateway cluster state or snapshot
builder.field("allocation_id", allocationId);
builder.startObject("assignment");
{
builder.field("executor_node", assignment.executorNode);
builder.field("explanation", assignment.explanation);
}
builder.endObject();
if (allocationIdOnLastStatusUpdate != null) {
builder.field("allocation_id_on_last_status_update", allocationIdOnLastStatusUpdate);
}
}
}
builder.endObject();
return builder;
}
@Override
public boolean isFragment() {
return false;
}
}
private static class TaskBuilder<Params extends PersistentTaskParams> {
private String id;
private long allocationId;
private String taskName;
private Params params;
private Status status;
private Assignment assignment = INITIAL_ASSIGNMENT;
private Long allocationIdOnLastStatusUpdate;
public TaskBuilder<Params> setId(String id) {
this.id = id;
return this;
}
public TaskBuilder<Params> setAllocationId(long allocationId) {
this.allocationId = allocationId;
return this;
}
public TaskBuilder<Params> setTaskName(String taskName) {
this.taskName = taskName;
return this;
}
public TaskBuilder<Params> setParams(Params params) {
this.params = params;
return this;
}
public TaskBuilder<Params> setStatus(Status status) {
this.status = status;
return this;
}
public TaskBuilder<Params> setAssignment(Assignment assignment) {
this.assignment = assignment;
return this;
}
public TaskBuilder<Params> setAllocationIdOnLastStatusUpdate(Long allocationIdOnLastStatusUpdate) {
this.allocationIdOnLastStatusUpdate = allocationIdOnLastStatusUpdate;
return this;
}
public PersistentTask<Params> build() {
return new PersistentTask<>(id, allocationId, taskName, params, status,
assignment, allocationIdOnLastStatusUpdate);
}
}
@Override
public String getWriteableName() {
return TYPE;
}
public PersistentTasksCustomMetaData(StreamInput in) throws IOException {
lastAllocationId = in.readLong();
tasks = in.readMap(StreamInput::readString, PersistentTask::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeLong(lastAllocationId);
out.writeMap(tasks, StreamOutput::writeString, (stream, value) -> value.writeTo(stream));
}
public static NamedDiff<MetaData.Custom> readDiffFrom(StreamInput in) throws IOException {
return readDiffFrom(MetaData.Custom.class, TYPE, in);
}
public long getLastAllocationId() {
return lastAllocationId;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("last_allocation_id", lastAllocationId);
builder.startArray("tasks");
for (PersistentTask<?> entry : tasks.values()) {
entry.toXContent(builder, params);
}
builder.endArray();
return builder;
}
public static Builder builder() {
return new Builder();
}
public static Builder builder(PersistentTasksCustomMetaData tasks) {
return new Builder(tasks);
}
public static class Builder {
private final Map<String, PersistentTask<?>> tasks = new HashMap<>();
private long lastAllocationId;
private boolean changed;
private Builder() {
}
private Builder(PersistentTasksCustomMetaData tasksInProgress) {
if (tasksInProgress != null) {
tasks.putAll(tasksInProgress.tasks);
lastAllocationId = tasksInProgress.lastAllocationId;
} else {
lastAllocationId = 0;
}
}
public long getLastAllocationId() {
return lastAllocationId;
}
private Builder setLastAllocationId(long currentId) {
this.lastAllocationId = currentId;
return this;
}
private <Params extends PersistentTaskParams> Builder setTasks(List<TaskBuilder<Params>> tasks) {
for (TaskBuilder builder : tasks) {
PersistentTask<?> task = builder.build();
this.tasks.put(task.getId(), task);
}
return this;
}
private long getNextAllocationId() {
lastAllocationId++;
return lastAllocationId;
}
/**
* Adds a new task to the builder
* <p>
* After the task is added its id can be found by calling {{@link #getLastAllocationId()}} method.
*/
public <Params extends PersistentTaskParams> Builder addTask(String taskId, String taskName, Params params,
Assignment assignment) {
changed = true;
PersistentTask<?> previousTask = tasks.put(taskId, new PersistentTask<>(taskId, taskName, params,
getNextAllocationId(), assignment));
if (previousTask != null) {
throw new ResourceAlreadyExistsException("Trying to override task with id {" + taskId + "}");
}
return this;
}
/**
* Reassigns the task to another node
*/
public Builder reassignTask(String taskId, Assignment assignment) {
PersistentTask<?> taskInProgress = tasks.get(taskId);
if (taskInProgress != null) {
changed = true;
tasks.put(taskId, new PersistentTask<>(taskInProgress, getNextAllocationId(), assignment));
} else {
throw new ResourceNotFoundException("cannot reassign task with id {" + taskId + "}, the task no longer exits");
}
return this;
}
/**
* Updates the task status
*/
public Builder updateTaskStatus(String taskId, Status status) {
PersistentTask<?> taskInProgress = tasks.get(taskId);
if (taskInProgress != null) {
changed = true;
tasks.put(taskId, new PersistentTask<>(taskInProgress, status));
} else {
throw new ResourceNotFoundException("cannot update task with id {" + taskId + "}, the task no longer exits");
}
return this;
}
/**
* Removes the task
*/
public Builder removeTask(String taskId) {
if (tasks.remove(taskId) != null) {
changed = true;
} else {
throw new ResourceNotFoundException("cannot remove task with id {" + taskId + "}, the task no longer exits");
}
return this;
}
/**
* Finishes the task
* <p>
* If the task is marked with removeOnCompletion flag, it is removed from the list, otherwise it is stopped.
*/
public Builder finishTask(String taskId) {
PersistentTask<?> taskInProgress = tasks.get(taskId);
if (taskInProgress != null) {
changed = true;
tasks.remove(taskId);
} else {
throw new ResourceNotFoundException("cannot finish task with id {" + taskId + "}, the task no longer exits");
}
return this;
}
/**
* Checks if the task is currently present in the list
*/
public boolean hasTask(String taskId) {
return tasks.containsKey(taskId);
}
/**
* Checks if the task is currently present in the list and has the right allocation id
*/
public boolean hasTask(String taskId, long allocationId) {
PersistentTask<?> taskInProgress = tasks.get(taskId);
if (taskInProgress != null) {
return taskInProgress.getAllocationId() == allocationId;
}
return false;
}
Set<String> getCurrentTaskIds() {
return tasks.keySet();
}
/**
* Returns true if any the task list was changed since the builder was created
*/
public boolean isChanged() {
return changed;
}
public PersistentTasksCustomMetaData build() {
return new PersistentTasksCustomMetaData(lastAllocationId, Collections.unmodifiableMap(tasks));
}
}
}

View File

@ -1,114 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.Assignment;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.util.Map;
import java.util.function.Predicate;
/**
* An executor of tasks that can survive restart of requesting or executing node.
* These tasks are using cluster state rather than only transport service to send requests and responses.
*/
public abstract class PersistentTasksExecutor<Params extends PersistentTaskParams> extends AbstractComponent {
private final String executor;
private final String taskName;
protected PersistentTasksExecutor(Settings settings, String taskName, String executor) {
super(settings);
this.taskName = taskName;
this.executor = executor;
}
public String getTaskName() {
return taskName;
}
public static final Assignment NO_NODE_FOUND = new Assignment(null, "no appropriate nodes found for the assignment");
/**
* Returns the node id where the params has to be executed,
* <p>
* The default implementation returns the least loaded data node
*/
public Assignment getAssignment(Params params, ClusterState clusterState) {
DiscoveryNode discoveryNode = selectLeastLoadedNode(clusterState, DiscoveryNode::isDataNode);
if (discoveryNode == null) {
return NO_NODE_FOUND;
} else {
return new Assignment(discoveryNode.getId(), "");
}
}
/**
* Finds the least loaded node that satisfies the selector criteria
*/
protected DiscoveryNode selectLeastLoadedNode(ClusterState clusterState, Predicate<DiscoveryNode> selector) {
long minLoad = Long.MAX_VALUE;
DiscoveryNode minLoadedNode = null;
PersistentTasksCustomMetaData persistentTasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
for (DiscoveryNode node : clusterState.getNodes()) {
if (selector.test(node)) {
if (persistentTasks == null) {
// We don't have any task running yet, pick the first available node
return node;
}
long numberOfTasks = persistentTasks.getNumberOfTasksOnNode(node.getId(), taskName);
if (minLoad > numberOfTasks) {
minLoad = numberOfTasks;
minLoadedNode = node;
}
}
}
return minLoadedNode;
}
/**
* Checks the current cluster state for compatibility with the params
* <p>
* Throws an exception if the supplied params cannot be executed on the cluster in the current state.
*/
public void validate(Params params, ClusterState clusterState) {
}
/**
* Creates a AllocatedPersistentTask for communicating with task manager
*/
protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId,
PersistentTask<Params> taskInProgress, Map<String, String> headers) {
return new AllocatedPersistentTask(id, type, action, getDescription(taskInProgress), parentTaskId, headers);
}
/**
* Returns task description that will be available via task manager
*/
protected String getDescription(PersistentTask<Params> taskInProgress) {
return "id=" + taskInProgress.getId();
}
/**
* This operation will be executed on the executor node.
* <p>
* NOTE: The nodeOperation has to throw an exception, trigger task.markAsCompleted() or task.completeAndNotifyIfNeeded() methods to
* indicate that the persistent task has finished.
*/
protected abstract void nodeOperation(AllocatedPersistentTask task, @Nullable Params params, @Nullable Task.Status status);
public String getExecutor() {
return executor;
}
}

View File

@ -1,41 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
* Components that registers all persistent task executors
*/
public class PersistentTasksExecutorRegistry extends AbstractComponent {
private final Map<String, PersistentTasksExecutor<?>> taskExecutors;
@SuppressWarnings("unchecked")
public PersistentTasksExecutorRegistry(Settings settings, Collection<PersistentTasksExecutor<?>> taskExecutors) {
super(settings);
Map<String, PersistentTasksExecutor<?>> map = new HashMap<>();
for (PersistentTasksExecutor<?> executor : taskExecutors) {
map.put(executor.getTaskName(), executor);
}
this.taskExecutors = Collections.unmodifiableMap(map);
}
@SuppressWarnings("unchecked")
public <Params extends PersistentTaskParams> PersistentTasksExecutor<Params> getPersistentTaskExecutorSafe(String taskName) {
PersistentTasksExecutor<Params> executor = (PersistentTasksExecutor<Params>) taskExecutors.get(taskName);
if (executor == null) {
throw new IllegalStateException("Unknown persistent executor [" + taskName + "]");
}
return executor;
}
}

View File

@ -1,265 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskAwareRequest;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import static java.util.Objects.requireNonNull;
/**
* This component is responsible for coordination of execution of persistent tasks on individual nodes. It runs on all
* non-transport client nodes in the cluster and monitors cluster state changes to detect started commands.
*/
public class PersistentTasksNodeService extends AbstractComponent implements ClusterStateListener {
private final Map<Long, AllocatedPersistentTask> runningTasks = new HashMap<>();
private final PersistentTasksService persistentTasksService;
private final PersistentTasksExecutorRegistry persistentTasksExecutorRegistry;
private final TaskManager taskManager;
private final NodePersistentTasksExecutor nodePersistentTasksExecutor;
public PersistentTasksNodeService(Settings settings,
PersistentTasksService persistentTasksService,
PersistentTasksExecutorRegistry persistentTasksExecutorRegistry,
TaskManager taskManager, NodePersistentTasksExecutor nodePersistentTasksExecutor) {
super(settings);
this.persistentTasksService = persistentTasksService;
this.persistentTasksExecutorRegistry = persistentTasksExecutorRegistry;
this.taskManager = taskManager;
this.nodePersistentTasksExecutor = nodePersistentTasksExecutor;
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) {
// wait until the gateway has recovered from disk, otherwise if the only master restarts
// we start cancelling all local tasks before cluster has a chance to recover.
return;
}
PersistentTasksCustomMetaData tasks = event.state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
PersistentTasksCustomMetaData previousTasks = event.previousState().getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
// Cluster State Local State Local Action
// STARTED NULL Create as STARTED, Start
// STARTED STARTED Noop - running
// STARTED COMPLETED Noop - waiting for notification ack
// NULL NULL Noop - nothing to do
// NULL STARTED Remove locally, Mark as PENDING_CANCEL, Cancel
// NULL COMPLETED Remove locally
// Master states:
// NULL - doesn't exist in the cluster state
// STARTED - exist in the cluster state
// Local state:
// NULL - we don't have task registered locally in runningTasks
// STARTED - registered in TaskManager, requires master notification when finishes
// PENDING_CANCEL - registered in TaskManager, doesn't require master notification when finishes
// COMPLETED - not registered in TaskManager, notified, waiting for master to remove it from CS so we can remove locally
// When task finishes if it is marked as STARTED or PENDING_CANCEL it is marked as COMPLETED and unregistered,
// If the task was STARTED, the master notification is also triggered (this is handled by unregisterTask() method, which is
// triggered by PersistentTaskListener
if (Objects.equals(tasks, previousTasks) == false || event.nodesChanged()) {
// We have some changes let's check if they are related to our node
String localNodeId = event.state().getNodes().getLocalNodeId();
Set<Long> notVisitedTasks = new HashSet<>(runningTasks.keySet());
if (tasks != null) {
for (PersistentTask<?> taskInProgress : tasks.tasks()) {
if (localNodeId.equals(taskInProgress.getExecutorNode())) {
Long allocationId = taskInProgress.getAllocationId();
AllocatedPersistentTask persistentTask = runningTasks.get(allocationId);
if (persistentTask == null) {
// New task - let's start it
startTask(taskInProgress);
} else {
// The task is still running
notVisitedTasks.remove(allocationId);
}
}
}
}
for (Long id : notVisitedTasks) {
AllocatedPersistentTask task = runningTasks.get(id);
if (task.getState() == AllocatedPersistentTask.State.COMPLETED) {
// Result was sent to the caller and the caller acknowledged acceptance of the result
logger.trace("Found completed persistent task [{}] with id [{}] and allocation id [{}] - removing",
task.getAction(), task.getPersistentTaskId(), task.getAllocationId());
runningTasks.remove(id);
} else {
// task is running locally, but master doesn't know about it - that means that the persistent task was removed
// cancel the task without notifying master
logger.trace("Found unregistered persistent task [{}] with id [{}] and allocation id [{}] - cancelling",
task.getAction(), task.getPersistentTaskId(), task.getAllocationId());
cancelTask(id);
}
}
}
}
private <Params extends PersistentTaskParams> void startTask(PersistentTask<Params> taskInProgress) {
PersistentTasksExecutor<Params> executor =
persistentTasksExecutorRegistry.getPersistentTaskExecutorSafe(taskInProgress.getTaskName());
TaskAwareRequest request = new TaskAwareRequest() {
TaskId parentTaskId = new TaskId("cluster", taskInProgress.getAllocationId());
@Override
public void setParentTask(TaskId taskId) {
throw new UnsupportedOperationException("parent task if for persistent tasks shouldn't change");
}
@Override
public TaskId getParentTask() {
return parentTaskId;
}
@Override
public Task createTask(long id, String type, String action, TaskId parentTaskId, Map<String, String> headers) {
return executor.createTask(id, type, action, parentTaskId, taskInProgress, headers);
}
};
AllocatedPersistentTask task = (AllocatedPersistentTask) taskManager.register("persistent", taskInProgress.getTaskName() + "[c]",
request);
boolean processed = false;
try {
task.init(persistentTasksService, taskManager, logger, taskInProgress.getId(), taskInProgress.getAllocationId());
logger.trace("Persistent task [{}] with id [{}] and allocation id [{}] was created", task.getAction(),
task.getPersistentTaskId(), task.getAllocationId());
try {
runningTasks.put(taskInProgress.getAllocationId(), task);
nodePersistentTasksExecutor.executeTask(taskInProgress.getParams(), taskInProgress.getStatus(), task, executor);
} catch (Exception e) {
// Submit task failure
task.markAsFailed(e);
}
processed = true;
} finally {
if (processed == false) {
// something went wrong - unregistering task
logger.warn("Persistent task [{}] with id [{}] and allocation id [{}] failed to create", task.getAction(),
task.getPersistentTaskId(), task.getAllocationId());
taskManager.unregister(task);
}
}
}
/**
* Unregisters and then cancels the locally running task using the task manager. No notification to master will be send upon
* cancellation.
*/
private void cancelTask(Long allocationId) {
AllocatedPersistentTask task = runningTasks.remove(allocationId);
if (task.markAsCancelled()) {
// Cancel the local task using the task manager
persistentTasksService.sendTaskManagerCancellation(task.getId(), new ActionListener<CancelTasksResponse>() {
@Override
public void onResponse(CancelTasksResponse cancelTasksResponse) {
logger.trace("Persistent task [{}] with id [{}] and allocation id [{}] was cancelled", task.getAction(),
task.getPersistentTaskId(), task.getAllocationId());
}
@Override
public void onFailure(Exception e) {
// There is really nothing we can do in case of failure here
logger.warn((Supplier<?>) () ->
new ParameterizedMessage("failed to cancel task [{}] with id [{}] and allocation id [{}]", task.getAction(),
task.getPersistentTaskId(), task.getAllocationId()), e);
}
});
}
}
public static class Status implements Task.Status {
public static final String NAME = "persistent_executor";
private final AllocatedPersistentTask.State state;
public Status(AllocatedPersistentTask.State state) {
this.state = requireNonNull(state, "State cannot be null");
}
public Status(StreamInput in) throws IOException {
state = AllocatedPersistentTask.State.valueOf(in.readString());
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("state", state.toString());
builder.endObject();
return builder;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(state.toString());
}
@Override
public String toString() {
return Strings.toString(this);
}
public AllocatedPersistentTask.State getState() {
return state;
}
@Override
public boolean isFragment() {
return false;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Status status = (Status) o;
return state == status.state;
}
@Override
public int hashCode() {
return Objects.hash(state);
}
}
}

View File

@ -1,184 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.util.function.Predicate;
import static org.elasticsearch.xpack.core.ClientHelper.PERSISTENT_TASK_ORIGIN;
import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
/**
* This service is used by persistent actions to propagate changes in the action state and notify about completion
*/
public class PersistentTasksService extends AbstractComponent {
private final Client client;
private final ClusterService clusterService;
private final ThreadPool threadPool;
public PersistentTasksService(Settings settings, ClusterService clusterService, ThreadPool threadPool, Client client) {
super(settings);
this.client = client;
this.clusterService = clusterService;
this.threadPool = threadPool;
}
/**
* Creates the specified persistent task and attempts to assign it to a node.
*/
@SuppressWarnings("unchecked")
public <Params extends PersistentTaskParams> void startPersistentTask(String taskId, String taskName, @Nullable Params params,
ActionListener<PersistentTask<Params>> listener) {
StartPersistentTaskAction.Request createPersistentActionRequest =
new StartPersistentTaskAction.Request(taskId, taskName, params);
try {
executeAsyncWithOrigin(client, PERSISTENT_TASK_ORIGIN, StartPersistentTaskAction.INSTANCE, createPersistentActionRequest,
ActionListener.wrap(o -> listener.onResponse((PersistentTask<Params>) o.getTask()), listener::onFailure));
} catch (Exception e) {
listener.onFailure(e);
}
}
/**
* Notifies the PersistentTasksClusterService about successful (failure == null) completion of a task or its failure
*/
public void sendCompletionNotification(String taskId, long allocationId, Exception failure,
ActionListener<PersistentTask<?>> listener) {
CompletionPersistentTaskAction.Request restartRequest = new CompletionPersistentTaskAction.Request(taskId, allocationId, failure);
try {
executeAsyncWithOrigin(client, PERSISTENT_TASK_ORIGIN, CompletionPersistentTaskAction.INSTANCE, restartRequest,
ActionListener.wrap(o -> listener.onResponse(o.getTask()), listener::onFailure));
} catch (Exception e) {
listener.onFailure(e);
}
}
/**
* Cancels a locally running task using the task manager
*/
void sendTaskManagerCancellation(long taskId, ActionListener<CancelTasksResponse> listener) {
DiscoveryNode localNode = clusterService.localNode();
CancelTasksRequest cancelTasksRequest = new CancelTasksRequest();
cancelTasksRequest.setTaskId(new TaskId(localNode.getId(), taskId));
cancelTasksRequest.setReason("persistent action was removed");
try {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), PERSISTENT_TASK_ORIGIN, cancelTasksRequest, listener,
client.admin().cluster()::cancelTasks);
} catch (Exception e) {
listener.onFailure(e);
}
}
/**
* Updates status of the persistent task.
* <p>
* Persistent task implementers shouldn't call this method directly and use
* {@link AllocatedPersistentTask#updatePersistentStatus} instead
*/
void updateStatus(String taskId, long allocationId, Task.Status status, ActionListener<PersistentTask<?>> listener) {
UpdatePersistentTaskStatusAction.Request updateStatusRequest =
new UpdatePersistentTaskStatusAction.Request(taskId, allocationId, status);
try {
executeAsyncWithOrigin(client, PERSISTENT_TASK_ORIGIN, UpdatePersistentTaskStatusAction.INSTANCE, updateStatusRequest,
ActionListener.wrap(o -> listener.onResponse(o.getTask()), listener::onFailure));
} catch (Exception e) {
listener.onFailure(e);
}
}
/**
* Cancels if needed and removes a persistent task
*/
public void cancelPersistentTask(String taskId, ActionListener<PersistentTask<?>> listener) {
RemovePersistentTaskAction.Request removeRequest = new RemovePersistentTaskAction.Request(taskId);
try {
executeAsyncWithOrigin(client, PERSISTENT_TASK_ORIGIN, RemovePersistentTaskAction.INSTANCE, removeRequest,
ActionListener.wrap(o -> listener.onResponse(o.getTask()), listener::onFailure));
} catch (Exception e) {
listener.onFailure(e);
}
}
/**
* Checks if the persistent task with giving id (taskId) has the desired state and if it doesn't
* waits of it.
*/
public void waitForPersistentTaskStatus(String taskId, Predicate<PersistentTask<?>> predicate, @Nullable TimeValue timeout,
WaitForPersistentTaskStatusListener<?> listener) {
ClusterStateObserver stateObserver = new ClusterStateObserver(clusterService, timeout, logger, threadPool.getThreadContext());
if (predicate.test(PersistentTasksCustomMetaData.getTaskWithId(stateObserver.setAndGetObservedState(), taskId))) {
listener.onResponse(PersistentTasksCustomMetaData.getTaskWithId(stateObserver.setAndGetObservedState(), taskId));
} else {
stateObserver.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
listener.onResponse(PersistentTasksCustomMetaData.getTaskWithId(state, taskId));
}
@Override
public void onClusterServiceClose() {
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
listener.onTimeout(timeout);
}
}, clusterState -> predicate.test(PersistentTasksCustomMetaData.getTaskWithId(clusterState, taskId)));
}
}
public void waitForPersistentTasksStatus(Predicate<PersistentTasksCustomMetaData> predicate,
@Nullable TimeValue timeout, ActionListener<Boolean> listener) {
ClusterStateObserver stateObserver = new ClusterStateObserver(clusterService, timeout,
logger, threadPool.getThreadContext());
if (predicate.test(stateObserver.setAndGetObservedState().metaData().custom(PersistentTasksCustomMetaData.TYPE))) {
listener.onResponse(true);
} else {
stateObserver.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
listener.onResponse(true);
}
@Override
public void onClusterServiceClose() {
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
listener.onFailure(new IllegalStateException("timed out after " + timeout));
}
}, clusterState -> predicate.test(clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE)), timeout);
}
}
public interface WaitForPersistentTaskStatusListener<Params extends PersistentTaskParams>
extends ActionListener<PersistentTask<Params>> {
default void onTimeout(TimeValue timeout) {
onFailure(new IllegalStateException("timed out after " + timeout));
}
}
}

View File

@ -1,162 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.io.IOException;
import java.util.Objects;
public class RemovePersistentTaskAction extends Action<RemovePersistentTaskAction.Request,
PersistentTaskResponse,
RemovePersistentTaskAction.RequestBuilder> {
public static final RemovePersistentTaskAction INSTANCE = new RemovePersistentTaskAction();
public static final String NAME = "cluster:admin/persistent/remove";
private RemovePersistentTaskAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public PersistentTaskResponse newResponse() {
return new PersistentTaskResponse();
}
public static class Request extends MasterNodeRequest<Request> {
private String taskId;
public Request() {
}
public Request(String taskId) {
this.taskId = taskId;
}
public void setTaskId(String taskId) {
this.taskId = taskId;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
taskId = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(taskId);
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Request request = (Request) o;
return Objects.equals(taskId, request.taskId);
}
@Override
public int hashCode() {
return Objects.hash(taskId);
}
}
public static class RequestBuilder extends MasterNodeOperationRequestBuilder<RemovePersistentTaskAction.Request,
PersistentTaskResponse, RemovePersistentTaskAction.RequestBuilder> {
protected RequestBuilder(ElasticsearchClient client, RemovePersistentTaskAction action) {
super(client, action, new Request());
}
public final RequestBuilder setTaskId(String taskId) {
request.setTaskId(taskId);
return this;
}
}
public static class TransportAction extends TransportMasterNodeAction<Request, PersistentTaskResponse> {
private final PersistentTasksClusterService persistentTasksClusterService;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters,
PersistentTasksClusterService persistentTasksClusterService,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, RemovePersistentTaskAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.persistentTasksClusterService = persistentTasksClusterService;
}
@Override
protected String executor() {
return ThreadPool.Names.MANAGEMENT;
}
@Override
protected PersistentTaskResponse newResponse() {
return new PersistentTaskResponse();
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
// Cluster is not affected but we look up repositories in metadata
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
@Override
protected final void masterOperation(final Request request, ClusterState state,
final ActionListener<PersistentTaskResponse> listener) {
persistentTasksClusterService.removePersistentTask(request.taskId, new ActionListener<PersistentTask<?>>() {
@Override
public void onResponse(PersistentTask<?> task) {
listener.onResponse(new PersistentTaskResponse(task));
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
}
}

View File

@ -1,232 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.io.IOException;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
/**
* This action can be used to add the record for the persistent action to the cluster state.
*/
public class StartPersistentTaskAction extends Action<StartPersistentTaskAction.Request,
PersistentTaskResponse,
StartPersistentTaskAction.RequestBuilder> {
public static final StartPersistentTaskAction INSTANCE = new StartPersistentTaskAction();
public static final String NAME = "cluster:admin/persistent/start";
private StartPersistentTaskAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public PersistentTaskResponse newResponse() {
return new PersistentTaskResponse();
}
public static class Request extends MasterNodeRequest<Request> {
private String taskId;
@Nullable
private String taskName;
private PersistentTaskParams params;
public Request() {
}
public Request(String taskId, String taskName, PersistentTaskParams params) {
this.taskId = taskId;
this.taskName = taskName;
this.params = params;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
taskId = in.readString();
taskName = in.readString();
params = in.readOptionalNamedWriteable(PersistentTaskParams.class);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(taskId);
out.writeString(taskName);
out.writeOptionalNamedWriteable(params);
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (this.taskId == null) {
validationException = addValidationError("task id must be specified", validationException);
}
if (this.taskName == null) {
validationException = addValidationError("action must be specified", validationException);
}
if (params != null) {
if (params.getWriteableName().equals(taskName) == false) {
validationException = addValidationError("params have to have the same writeable name as task. params: " +
params.getWriteableName() + " task: " + taskName, validationException);
}
}
return validationException;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Request request1 = (Request) o;
return Objects.equals(taskId, request1.taskId) && Objects.equals(taskName, request1.taskName) &&
Objects.equals(params, request1.params);
}
@Override
public int hashCode() {
return Objects.hash(taskId, taskName, params);
}
public String getTaskName() {
return taskName;
}
public void setTaskName(String taskName) {
this.taskName = taskName;
}
public String getTaskId() {
return taskId;
}
public void setTaskId(String taskId) {
this.taskId = taskId;
}
public PersistentTaskParams getParams() {
return params;
}
@Nullable
public void setParams(PersistentTaskParams params) {
this.params = params;
}
}
public static class RequestBuilder extends MasterNodeOperationRequestBuilder<StartPersistentTaskAction.Request,
PersistentTaskResponse, StartPersistentTaskAction.RequestBuilder> {
protected RequestBuilder(ElasticsearchClient client, StartPersistentTaskAction action) {
super(client, action, new Request());
}
public RequestBuilder setTaskId(String taskId) {
request.setTaskId(taskId);
return this;
}
public RequestBuilder setAction(String action) {
request.setTaskName(action);
return this;
}
public RequestBuilder setRequest(PersistentTaskParams params) {
request.setParams(params);
return this;
}
}
public static class TransportAction extends TransportMasterNodeAction<Request, PersistentTaskResponse> {
private final PersistentTasksClusterService persistentTasksClusterService;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters,
PersistentTasksClusterService persistentTasksClusterService,
PersistentTasksExecutorRegistry persistentTasksExecutorRegistry,
PersistentTasksService persistentTasksService,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, StartPersistentTaskAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.persistentTasksClusterService = persistentTasksClusterService;
NodePersistentTasksExecutor executor = new NodePersistentTasksExecutor(threadPool);
clusterService.addListener(new PersistentTasksNodeService(settings, persistentTasksService, persistentTasksExecutorRegistry,
transportService.getTaskManager(), executor));
}
@Override
protected String executor() {
return ThreadPool.Names.GENERIC;
}
@Override
protected PersistentTaskResponse newResponse() {
return new PersistentTaskResponse();
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
// Cluster is not affected but we look up repositories in metadata
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
@Override
protected final void masterOperation(final Request request, ClusterState state,
final ActionListener<PersistentTaskResponse> listener) {
persistentTasksClusterService.createPersistentTask(request.taskId, request.taskName, request.params,
new ActionListener<PersistentTask<?>>() {
@Override
public void onResponse(PersistentTask<?> task) {
listener.onResponse(new PersistentTaskResponse(task));
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
}
}

View File

@ -1,197 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.io.IOException;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
public class UpdatePersistentTaskStatusAction extends Action<UpdatePersistentTaskStatusAction.Request,
PersistentTaskResponse,
UpdatePersistentTaskStatusAction.RequestBuilder> {
public static final UpdatePersistentTaskStatusAction INSTANCE = new UpdatePersistentTaskStatusAction();
public static final String NAME = "cluster:admin/persistent/update_status";
private UpdatePersistentTaskStatusAction() {
super(NAME);
}
@Override
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new RequestBuilder(client, this);
}
@Override
public PersistentTaskResponse newResponse() {
return new PersistentTaskResponse();
}
public static class Request extends MasterNodeRequest<Request> {
private String taskId;
private long allocationId = -1L;
private Task.Status status;
public Request() {
}
public Request(String taskId, long allocationId, Task.Status status) {
this.taskId = taskId;
this.allocationId = allocationId;
this.status = status;
}
public void setTaskId(String taskId) {
this.taskId = taskId;
}
public void setAllocationId(long allocationId) {
this.allocationId = allocationId;
}
public void setStatus(Task.Status status) {
this.status = status;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
taskId = in.readString();
allocationId = in.readLong();
status = in.readOptionalNamedWriteable(Task.Status.class);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(taskId);
out.writeLong(allocationId);
out.writeOptionalNamedWriteable(status);
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
if (this.taskId == null) {
validationException = addValidationError("task id must be specified", validationException);
}
if (this.allocationId == -1L) {
validationException = addValidationError("allocationId must be specified", validationException);
}
// We cannot really check if status has the same type as task because we don't have access
// to the task here. We will check it when we try to update the task
return validationException;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Request request = (Request) o;
return Objects.equals(taskId, request.taskId) && allocationId == request.allocationId &&
Objects.equals(status, request.status);
}
@Override
public int hashCode() {
return Objects.hash(taskId, allocationId, status);
}
}
public static class RequestBuilder extends MasterNodeOperationRequestBuilder<UpdatePersistentTaskStatusAction.Request,
PersistentTaskResponse, UpdatePersistentTaskStatusAction.RequestBuilder> {
protected RequestBuilder(ElasticsearchClient client, UpdatePersistentTaskStatusAction action) {
super(client, action, new Request());
}
public final RequestBuilder setTaskId(String taskId) {
request.setTaskId(taskId);
return this;
}
public final RequestBuilder setStatus(Task.Status status) {
request.setStatus(status);
return this;
}
}
public static class TransportAction extends TransportMasterNodeAction<Request, PersistentTaskResponse> {
private final PersistentTasksClusterService persistentTasksClusterService;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters,
PersistentTasksClusterService persistentTasksClusterService,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, UpdatePersistentTaskStatusAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.persistentTasksClusterService = persistentTasksClusterService;
}
@Override
protected String executor() {
return ThreadPool.Names.MANAGEMENT;
}
@Override
protected PersistentTaskResponse newResponse() {
return new PersistentTaskResponse();
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
// Cluster is not affected but we look up repositories in metadata
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
}
@Override
protected final void masterOperation(final Request request, ClusterState state,
final ActionListener<PersistentTaskResponse> listener) {
persistentTasksClusterService.updatePersistentTaskStatus(request.taskId, request.allocationId, request.status,
new ActionListener<PersistentTask<?>>() {
@Override
public void onResponse(PersistentTask<?> task) {
listener.onResponse(new PersistentTaskResponse(task));
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
}
}

View File

@ -1,35 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
/**
* The Persistent Tasks Executors are responsible for executing restartable tasks that can survive disappearance of a
* coordinating and executor nodes.
* <p>
* In order to be resilient to node restarts, the persistent tasks are using the cluster state instead of a transport service to send
* requests and responses. The execution is done in six phases:
* <p>
* 1. The coordinating node sends an ordinary transport request to the master node to start a new persistent task. This task is handled
* by the {@link org.elasticsearch.xpack.core.persistent.PersistentTasksService}, which is using
* {@link org.elasticsearch.xpack.core.persistent.PersistentTasksClusterService} to update cluster state with the record about running
* persistent task.
* <p>
* 2. The master node updates the {@link org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData} in the cluster state to
* indicate that there is a new persistent task is running in the system.
* <p>
* 3. The {@link org.elasticsearch.xpack.core.persistent.PersistentTasksNodeService} running on every node in the cluster monitors changes
* in the cluster state and starts execution of all new tasks assigned to the node it is running on.
* <p>
* 4. If the task fails to start on the node, the {@link org.elasticsearch.xpack.core.persistent.PersistentTasksNodeService} uses the
* {@link org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData} to notify the
* {@link org.elasticsearch.xpack.core.persistent.PersistentTasksService}, which reassigns the action to another node in the cluster.
* <p>
* 5. If a task finishes successfully on the node and calls listener.onResponse(), the corresponding persistent action is removed from the
* cluster state unless removeOnCompletion flag for this task is set to false.
* <p>
* 6. The {@link org.elasticsearch.xpack.core.persistent.RemovePersistentTaskAction} action can be also used to remove the persistent task.
*/
package org.elasticsearch.xpack.core.persistent;

View File

@ -45,6 +45,7 @@ import org.elasticsearch.plugins.DiscoveryPlugin;
import org.elasticsearch.plugins.IngestPlugin;
import org.elasticsearch.plugins.MapperPlugin;
import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.plugins.PersistentTaskPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.ScriptPlugin;
import org.elasticsearch.rest.RestController;
@ -56,7 +57,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportInterceptor;
import org.elasticsearch.watcher.ResourceWatcherService;
import org.elasticsearch.xpack.core.XPackPlugin;
import org.elasticsearch.persistent.PersistentTasksExecutor;
import org.elasticsearch.xpack.core.ssl.SSLService;
import java.nio.file.Path;
@ -74,8 +75,10 @@ import java.util.function.Supplier;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
import static java.util.stream.Collectors.toList;
public class LocalStateCompositeXPackPlugin extends XPackPlugin implements ScriptPlugin, ActionPlugin, IngestPlugin, NetworkPlugin,
ClusterPlugin, DiscoveryPlugin, MapperPlugin, AnalysisPlugin {
ClusterPlugin, DiscoveryPlugin, MapperPlugin, AnalysisPlugin, PersistentTaskPlugin {
private XPackLicenseState licenseState;
private SSLService sslService;
@ -379,6 +382,14 @@ public class LocalStateCompositeXPackPlugin extends XPackPlugin implements Scrip
}
}
@Override
public List<PersistentTasksExecutor<?>> getPersistentTasksExecutor(ClusterService clusterService) {
return filterPlugins(PersistentTaskPlugin.class).stream()
.map(p -> p.getPersistentTasksExecutor(clusterService))
.flatMap(List::stream)
.collect(toList());
}
private <T> List<T> filterPlugins(Class<T> type) {
return plugins.stream().filter(x -> type.isAssignableFrom(x.getClass())).map(p -> ((T)p))
.collect(Collectors.toList());

View File

@ -1,24 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.test.AbstractStreamableTestCase;
import org.elasticsearch.xpack.core.persistent.RemovePersistentTaskAction.Request;
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength;
public class CancelPersistentTaskRequestTests extends AbstractStreamableTestCase<Request> {
@Override
protected Request createTestInstance() {
return new Request(randomAsciiOfLength(10));
}
@Override
protected Request createBlankInstance() {
return new Request();
}
}

View File

@ -1,439 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.Assignment;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestParams;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import static java.util.Collections.emptyMap;
import static org.elasticsearch.xpack.core.persistent.PersistentTasksExecutor.NO_NODE_FOUND;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
public class PersistentTasksClusterServiceTests extends ESTestCase {
public void testReassignmentRequired() {
int numberOfIterations = randomIntBetween(1, 30);
ClusterState clusterState = initialState();
for (int i = 0; i < numberOfIterations; i++) {
boolean significant = randomBoolean();
ClusterState previousState = clusterState;
logger.info("inter {} significant: {}", i, significant);
if (significant) {
clusterState = significantChange(clusterState);
} else {
clusterState = insignificantChange(clusterState);
}
ClusterChangedEvent event = new ClusterChangedEvent("test", clusterState, previousState);
assertThat(dumpEvent(event), PersistentTasksClusterService.reassignmentRequired(event,
new PersistentTasksClusterService.ExecutorNodeDecider() {
@Override
public <Params extends PersistentTaskParams> Assignment getAssignment(
String action, ClusterState currentState, Params params) {
if ("never_assign".equals(((TestParams) params).getTestParam())) {
return NO_NODE_FOUND;
}
return randomNodeAssignment(currentState.nodes());
}
}), equalTo(significant));
}
}
public void testReassignTasksWithNoTasks() {
ClusterState clusterState = initialState();
assertThat(reassign(clusterState).metaData().custom(PersistentTasksCustomMetaData.TYPE), nullValue());
}
public void testReassignConsidersClusterStateUpdates() {
ClusterState clusterState = initialState();
ClusterState.Builder builder = ClusterState.builder(clusterState);
PersistentTasksCustomMetaData.Builder tasks = PersistentTasksCustomMetaData.builder(
clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE));
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
addTestNodes(nodes, randomIntBetween(1, 10));
int numberOfTasks = randomIntBetween(2, 40);
for (int i = 0; i < numberOfTasks; i++) {
addTask(tasks, "assign_one", randomBoolean() ? null : "no_longer_exits");
}
MetaData.Builder metaData = MetaData.builder(clusterState.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE, tasks.build());
clusterState = builder.metaData(metaData).nodes(nodes).build();
ClusterState newClusterState = reassign(clusterState);
PersistentTasksCustomMetaData tasksInProgress = newClusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
assertThat(tasksInProgress, notNullValue());
}
public void testReassignTasks() {
ClusterState clusterState = initialState();
ClusterState.Builder builder = ClusterState.builder(clusterState);
PersistentTasksCustomMetaData.Builder tasks = PersistentTasksCustomMetaData.builder(
clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE));
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
addTestNodes(nodes, randomIntBetween(1, 10));
int numberOfTasks = randomIntBetween(0, 40);
for (int i = 0; i < numberOfTasks; i++) {
switch (randomInt(2)) {
case 0:
// add an unassigned task that should get assigned because it's assigned to a non-existing node or unassigned
addTask(tasks, "assign_me", randomBoolean() ? null : "no_longer_exits");
break;
case 1:
// add a task assigned to non-existing node that should not get assigned
addTask(tasks, "dont_assign_me", randomBoolean() ? null : "no_longer_exits");
break;
case 2:
addTask(tasks, "assign_one", randomBoolean() ? null : "no_longer_exits");
break;
}
}
MetaData.Builder metaData = MetaData.builder(clusterState.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE, tasks.build());
clusterState = builder.metaData(metaData).nodes(nodes).build();
ClusterState newClusterState = reassign(clusterState);
PersistentTasksCustomMetaData tasksInProgress = newClusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
assertThat(tasksInProgress, notNullValue());
assertThat("number of tasks shouldn't change as a result or reassignment",
numberOfTasks, equalTo(tasksInProgress.tasks().size()));
int assignOneCount = 0;
for (PersistentTask<?> task : tasksInProgress.tasks()) {
// explanation should correspond to the action name
switch (((TestParams) task.getParams()).getTestParam()) {
case "assign_me":
assertThat(task.getExecutorNode(), notNullValue());
assertThat(task.isAssigned(), equalTo(true));
if (clusterState.nodes().nodeExists(task.getExecutorNode()) == false) {
logger.info(clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE).toString());
}
assertThat("task should be assigned to a node that is in the cluster, was assigned to " + task.getExecutorNode(),
clusterState.nodes().nodeExists(task.getExecutorNode()), equalTo(true));
assertThat(task.getAssignment().getExplanation(), equalTo("test assignment"));
break;
case "dont_assign_me":
assertThat(task.getExecutorNode(), nullValue());
assertThat(task.isAssigned(), equalTo(false));
assertThat(task.getAssignment().getExplanation(), equalTo("no appropriate nodes found for the assignment"));
break;
case "assign_one":
if (task.isAssigned()) {
assignOneCount++;
assertThat("more than one assign_one tasks are assigned", assignOneCount, lessThanOrEqualTo(1));
assertThat(task.getAssignment().getExplanation(), equalTo("test assignment"));
} else {
assertThat(task.getAssignment().getExplanation(), equalTo("only one task can be assigned at a time"));
}
break;
default:
fail("Unknown action " + task.getTaskName());
}
}
}
private void addTestNodes(DiscoveryNodes.Builder nodes, int nonLocalNodesCount) {
for (int i = 0; i < nonLocalNodesCount; i++) {
nodes.add(new DiscoveryNode("other_node_" + i, buildNewFakeTransportAddress(), Version.CURRENT));
}
}
private ClusterState reassign(ClusterState clusterState) {
return PersistentTasksClusterService.reassignTasks(clusterState, logger,
new PersistentTasksClusterService.ExecutorNodeDecider() {
@Override
public <Params extends PersistentTaskParams> Assignment getAssignment(
String action, ClusterState currentState, Params params) {
TestParams testParams = (TestParams) params;
switch (testParams.getTestParam()) {
case "assign_me":
return randomNodeAssignment(currentState.nodes());
case "dont_assign_me":
return NO_NODE_FOUND;
case "fail_me_if_called":
fail("the decision decider shouldn't be called on this task");
return null;
case "assign_one":
return assignOnlyOneTaskAtATime(currentState);
default:
fail("unknown param " + testParams.getTestParam());
}
return NO_NODE_FOUND;
}
});
}
private Assignment assignOnlyOneTaskAtATime(ClusterState clusterState) {
DiscoveryNodes nodes = clusterState.nodes();
PersistentTasksCustomMetaData tasksInProgress = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
if (tasksInProgress.findTasks(TestPersistentTasksExecutor.NAME, task ->
"assign_one".equals(((TestParams) task.getParams()).getTestParam()) &&
nodes.nodeExists(task.getExecutorNode())).isEmpty()) {
return randomNodeAssignment(clusterState.nodes());
} else {
return new Assignment(null, "only one task can be assigned at a time");
}
}
private Assignment randomNodeAssignment(DiscoveryNodes nodes) {
if (nodes.getNodes().isEmpty()) {
return NO_NODE_FOUND;
}
List<String> nodeList = new ArrayList<>();
for (ObjectCursor<String> node : nodes.getNodes().keys()) {
nodeList.add(node.value);
}
String node = randomFrom(nodeList);
if (node != null) {
return new Assignment(node, "test assignment");
} else {
return NO_NODE_FOUND;
}
}
private String dumpEvent(ClusterChangedEvent event) {
return "nodes_changed: " + event.nodesChanged() +
" nodes_removed:" + event.nodesRemoved() +
" routing_table_changed:" + event.routingTableChanged() +
" tasks: " + event.state().metaData().custom(PersistentTasksCustomMetaData.TYPE);
}
private ClusterState significantChange(ClusterState clusterState) {
ClusterState.Builder builder = ClusterState.builder(clusterState);
PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
if (tasks != null) {
if (randomBoolean()) {
for (PersistentTask<?> task : tasks.tasks()) {
if (task.isAssigned() && clusterState.nodes().nodeExists(task.getExecutorNode())) {
logger.info("removed node {}", task.getExecutorNode());
builder.nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(task.getExecutorNode()));
return builder.build();
}
}
}
}
boolean tasksOrNodesChanged = false;
// add a new unassigned task
if (hasAssignableTasks(tasks, clusterState.nodes()) == false) {
// we don't have any unassigned tasks - add some
if (randomBoolean()) {
logger.info("added random task");
addRandomTask(builder, MetaData.builder(clusterState.metaData()), PersistentTasksCustomMetaData.builder(tasks), null);
tasksOrNodesChanged = true;
} else {
logger.info("added unassignable task with custom assignment message");
addRandomTask(builder, MetaData.builder(clusterState.metaData()), PersistentTasksCustomMetaData.builder(tasks),
new Assignment(null, "change me"), "never_assign");
tasksOrNodesChanged = true;
}
}
// add a node if there are unassigned tasks
if (clusterState.nodes().getNodes().isEmpty()) {
logger.info("added random node");
builder.nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode(randomAlphaOfLength(10))));
tasksOrNodesChanged = true;
}
if (tasksOrNodesChanged == false) {
// change routing table to simulate a change
logger.info("changed routing table");
MetaData.Builder metaData = MetaData.builder(clusterState.metaData());
RoutingTable.Builder routingTable = RoutingTable.builder(clusterState.routingTable());
changeRoutingTable(metaData, routingTable);
builder.metaData(metaData).routingTable(routingTable.build());
}
return builder.build();
}
private PersistentTasksCustomMetaData removeTasksWithChangingAssignment(PersistentTasksCustomMetaData tasks) {
if (tasks != null) {
boolean changed = false;
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(tasks);
for (PersistentTask<?> task : tasks.tasks()) {
// Remove all unassigned tasks that cause changing assignments they might trigger a significant change
if ("never_assign".equals(((TestParams) task.getParams()).getTestParam()) &&
"change me".equals(task.getAssignment().getExplanation())) {
logger.info("removed task with changing assignment {}", task.getId());
tasksBuilder.removeTask(task.getId());
changed = true;
}
}
if (changed) {
return tasksBuilder.build();
}
}
return tasks;
}
private ClusterState insignificantChange(ClusterState clusterState) {
ClusterState.Builder builder = ClusterState.builder(clusterState);
PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
tasks = removeTasksWithChangingAssignment(tasks);
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder(tasks);
if (randomBoolean()) {
if (hasAssignableTasks(tasks, clusterState.nodes()) == false) {
// we don't have any unassigned tasks - adding a node or changing a routing table shouldn't affect anything
if (randomBoolean()) {
logger.info("added random node");
builder.nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode(randomAlphaOfLength(10))));
}
if (randomBoolean()) {
logger.info("added random unassignable task");
addRandomTask(builder, MetaData.builder(clusterState.metaData()), tasksBuilder, NO_NODE_FOUND, "never_assign");
return builder.build();
}
logger.info("changed routing table");
MetaData.Builder metaData = MetaData.builder(clusterState.metaData());
metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build());
RoutingTable.Builder routingTable = RoutingTable.builder(clusterState.routingTable());
changeRoutingTable(metaData, routingTable);
builder.metaData(metaData).routingTable(routingTable.build());
return builder.build();
}
}
if (randomBoolean()) {
// remove a node that doesn't have any tasks assigned to it and it's not the master node
for (DiscoveryNode node : clusterState.nodes()) {
if (hasTasksAssignedTo(tasks, node.getId()) == false && "this_node".equals(node.getId()) == false) {
logger.info("removed unassigned node {}", node.getId());
return builder.nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(node.getId())).build();
}
}
}
if (randomBoolean()) {
// clear the task
if (randomBoolean()) {
logger.info("removed all tasks");
MetaData.Builder metaData = MetaData.builder(clusterState.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE,
PersistentTasksCustomMetaData.builder().build());
return builder.metaData(metaData).build();
} else {
logger.info("set task custom to null");
MetaData.Builder metaData = MetaData.builder(clusterState.metaData()).removeCustom(PersistentTasksCustomMetaData.TYPE);
return builder.metaData(metaData).build();
}
}
logger.info("removed all unassigned tasks and changed routing table");
if (tasks != null) {
for (PersistentTask<?> task : tasks.tasks()) {
if (task.getExecutorNode() == null || "never_assign".equals(((TestParams) task.getParams()).getTestParam())) {
tasksBuilder.removeTask(task.getId());
}
}
}
// Just add a random index - that shouldn't change anything
IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10))
.settings(Settings.builder().put("index.version.created", VersionUtils.randomVersion(random())))
.numberOfShards(1)
.numberOfReplicas(1)
.build();
MetaData.Builder metaData = MetaData.builder(clusterState.metaData()).put(indexMetaData, false)
.putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build());
return builder.metaData(metaData).build();
}
private boolean hasAssignableTasks(PersistentTasksCustomMetaData tasks, DiscoveryNodes discoveryNodes) {
if (tasks == null || tasks.tasks().isEmpty()) {
return false;
}
return tasks.tasks().stream().anyMatch(task -> {
if (task.getExecutorNode() == null || discoveryNodes.nodeExists(task.getExecutorNode())) {
return "never_assign".equals(((TestParams) task.getParams()).getTestParam()) == false;
}
return false;
});
}
private boolean hasTasksAssignedTo(PersistentTasksCustomMetaData tasks, String nodeId) {
return tasks != null && tasks.tasks().stream().anyMatch(
task -> nodeId.equals(task.getExecutorNode())) == false;
}
private ClusterState.Builder addRandomTask(ClusterState.Builder clusterStateBuilder,
MetaData.Builder metaData, PersistentTasksCustomMetaData.Builder tasks,
String node) {
return addRandomTask(clusterStateBuilder, metaData, tasks, new Assignment(node, randomAlphaOfLength(10)),
randomAlphaOfLength(10));
}
private ClusterState.Builder addRandomTask(ClusterState.Builder clusterStateBuilder,
MetaData.Builder metaData, PersistentTasksCustomMetaData.Builder tasks,
Assignment assignment, String param) {
return clusterStateBuilder.metaData(metaData.putCustom(PersistentTasksCustomMetaData.TYPE,
tasks.addTask(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams(param), assignment).build()));
}
private void addTask(PersistentTasksCustomMetaData.Builder tasks, String param, String node) {
tasks.addTask(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams(param),
new Assignment(node, "explanation: " + param));
}
private DiscoveryNode newNode(String nodeId) {
return new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), emptyMap(),
Collections.unmodifiableSet(new HashSet<>(Arrays.asList(DiscoveryNode.Role.MASTER, DiscoveryNode.Role.DATA))),
Version.CURRENT);
}
private ClusterState initialState() {
MetaData.Builder metaData = MetaData.builder();
RoutingTable.Builder routingTable = RoutingTable.builder();
int randomIndices = randomIntBetween(0, 5);
for (int i = 0; i < randomIndices; i++) {
changeRoutingTable(metaData, routingTable);
}
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
nodes.add(DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), "this_node"));
nodes.localNodeId("this_node");
nodes.masterNodeId("this_node");
return ClusterState.builder(ClusterName.DEFAULT)
.metaData(metaData)
.routingTable(routingTable.build())
.build();
}
private void changeRoutingTable(MetaData.Builder metaData, RoutingTable.Builder routingTable) {
IndexMetaData indexMetaData = IndexMetaData.builder(randomAlphaOfLength(10))
.settings(Settings.builder().put("index.version.created", VersionUtils.randomVersion(random())))
.numberOfShards(1)
.numberOfReplicas(1)
.build();
metaData.put(indexMetaData, false);
routingTable.addAsNew(indexMetaData);
}
}

View File

@ -1,249 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.MetaData.Custom;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.AbstractDiffableSerializationTestCase;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.Assignment;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.Builder;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.Status;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestParams;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import static org.elasticsearch.cluster.metadata.MetaData.CONTEXT_MODE_GATEWAY;
import static org.elasticsearch.cluster.metadata.MetaData.CONTEXT_MODE_SNAPSHOT;
import static org.elasticsearch.xpack.core.persistent.PersistentTasksExecutor.NO_NODE_FOUND;
public class PersistentTasksCustomMetaDataTests extends AbstractDiffableSerializationTestCase<Custom> {
@Override
protected PersistentTasksCustomMetaData createTestInstance() {
int numberOfTasks = randomInt(10);
PersistentTasksCustomMetaData.Builder tasks = PersistentTasksCustomMetaData.builder();
for (int i = 0; i < numberOfTasks; i++) {
String taskId = UUIDs.base64UUID();
tasks.addTask(taskId, TestPersistentTasksExecutor.NAME, new TestParams(randomAlphaOfLength(10)),
randomAssignment());
if (randomBoolean()) {
// From time to time update status
tasks.updateTaskStatus(taskId, new Status(randomAlphaOfLength(10)));
}
}
return tasks.build();
}
@Override
protected Writeable.Reader<Custom> instanceReader() {
return PersistentTasksCustomMetaData::new;
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return new NamedWriteableRegistry(Arrays.asList(
new Entry(MetaData.Custom.class, PersistentTasksCustomMetaData.TYPE, PersistentTasksCustomMetaData::new),
new Entry(NamedDiff.class, PersistentTasksCustomMetaData.TYPE, PersistentTasksCustomMetaData::readDiffFrom),
new Entry(PersistentTaskParams.class, TestPersistentTasksExecutor.NAME, TestParams::new),
new Entry(Task.Status.class, TestPersistentTasksExecutor.NAME, Status::new)
));
}
@Override
protected Custom makeTestChanges(Custom testInstance) {
Builder builder = PersistentTasksCustomMetaData.builder((PersistentTasksCustomMetaData) testInstance);
switch (randomInt(3)) {
case 0:
addRandomTask(builder);
break;
case 1:
if (builder.getCurrentTaskIds().isEmpty()) {
addRandomTask(builder);
} else {
builder.reassignTask(pickRandomTask(builder), randomAssignment());
}
break;
case 2:
if (builder.getCurrentTaskIds().isEmpty()) {
addRandomTask(builder);
} else {
builder.updateTaskStatus(pickRandomTask(builder), randomBoolean() ? new Status(randomAlphaOfLength(10)) : null);
}
break;
case 3:
if (builder.getCurrentTaskIds().isEmpty()) {
addRandomTask(builder);
} else {
builder.removeTask(pickRandomTask(builder));
}
break;
}
return builder.build();
}
@Override
protected Writeable.Reader<Diff<Custom>> diffReader() {
return PersistentTasksCustomMetaData::readDiffFrom;
}
@Override
protected PersistentTasksCustomMetaData doParseInstance(XContentParser parser) throws IOException {
return PersistentTasksCustomMetaData.fromXContent(parser);
}
/*
@Override
protected XContentBuilder toXContent(Custom instance, XContentType contentType) throws IOException {
return toXContent(instance, contentType, new ToXContent.MapParams(
Collections.singletonMap(MetaData.CONTEXT_MODE_PARAM, MetaData.XContentContext.API.toString())));
}
*/
private String addRandomTask(Builder builder) {
String taskId = UUIDs.base64UUID();
builder.addTask(taskId, TestPersistentTasksExecutor.NAME, new TestParams(randomAlphaOfLength(10)), randomAssignment());
return taskId;
}
private String pickRandomTask(PersistentTasksCustomMetaData.Builder testInstance) {
return randomFrom(new ArrayList<>(testInstance.getCurrentTaskIds()));
}
@Override
protected NamedXContentRegistry xContentRegistry() {
return new NamedXContentRegistry(Arrays.asList(
new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(TestPersistentTasksExecutor.NAME),
TestParams::fromXContent),
new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(TestPersistentTasksExecutor.NAME), Status::fromXContent)
));
}
@SuppressWarnings("unchecked")
public void testSerializationContext() throws Exception {
PersistentTasksCustomMetaData testInstance = createTestInstance();
for (int i = 0; i < randomInt(10); i++) {
testInstance = (PersistentTasksCustomMetaData) makeTestChanges(testInstance);
}
ToXContent.MapParams params = new ToXContent.MapParams(
Collections.singletonMap(MetaData.CONTEXT_MODE_PARAM, randomFrom(CONTEXT_MODE_SNAPSHOT, CONTEXT_MODE_GATEWAY)));
XContentType xContentType = randomFrom(XContentType.values());
BytesReference shuffled = toShuffledXContent(testInstance, xContentType, params, false);
XContentParser parser = createParser(XContentFactory.xContent(xContentType), shuffled);
PersistentTasksCustomMetaData newInstance = doParseInstance(parser);
assertNotSame(newInstance, testInstance);
assertEquals(testInstance.tasks().size(), newInstance.tasks().size());
for (PersistentTask<?> testTask : testInstance.tasks()) {
PersistentTask<TestParams> newTask = (PersistentTask<TestParams>) newInstance.getTask(testTask.getId());
assertNotNull(newTask);
// Things that should be serialized
assertEquals(testTask.getTaskName(), newTask.getTaskName());
assertEquals(testTask.getId(), newTask.getId());
assertEquals(testTask.getStatus(), newTask.getStatus());
assertEquals(testTask.getParams(), newTask.getParams());
// Things that shouldn't be serialized
assertEquals(0, newTask.getAllocationId());
assertNull(newTask.getExecutorNode());
}
}
public void testBuilder() {
PersistentTasksCustomMetaData persistentTasks = null;
String lastKnownTask = "";
for (int i = 0; i < randomIntBetween(10, 100); i++) {
final Builder builder;
if (randomBoolean()) {
builder = PersistentTasksCustomMetaData.builder();
} else {
builder = PersistentTasksCustomMetaData.builder(persistentTasks);
}
boolean changed = false;
for (int j = 0; j < randomIntBetween(1, 10); j++) {
switch (randomInt(4)) {
case 0:
lastKnownTask = addRandomTask(builder);
changed = true;
break;
case 1:
if (builder.hasTask(lastKnownTask)) {
changed = true;
builder.reassignTask(lastKnownTask, randomAssignment());
} else {
String fLastKnownTask = lastKnownTask;
expectThrows(ResourceNotFoundException.class, () -> builder.reassignTask(fLastKnownTask, randomAssignment()));
}
break;
case 2:
if (builder.hasTask(lastKnownTask)) {
changed = true;
builder.updateTaskStatus(lastKnownTask, randomBoolean() ? new Status(randomAlphaOfLength(10)) : null);
} else {
String fLastKnownTask = lastKnownTask;
expectThrows(ResourceNotFoundException.class, () -> builder.updateTaskStatus(fLastKnownTask, null));
}
break;
case 3:
if (builder.hasTask(lastKnownTask)) {
changed = true;
builder.removeTask(lastKnownTask);
} else {
String fLastKnownTask = lastKnownTask;
expectThrows(ResourceNotFoundException.class, () -> builder.removeTask(fLastKnownTask));
}
break;
case 4:
if (builder.hasTask(lastKnownTask)) {
changed = true;
builder.finishTask(lastKnownTask);
} else {
String fLastKnownTask = lastKnownTask;
expectThrows(ResourceNotFoundException.class, () -> builder.finishTask(fLastKnownTask));
}
break;
}
}
assertEquals(changed, builder.isChanged());
persistentTasks = builder.build();
}
}
private Assignment randomAssignment() {
if (randomBoolean()) {
if (randomBoolean()) {
return NO_NODE_FOUND;
} else {
return new Assignment(null, randomAlphaOfLength(10));
}
}
return new Assignment(randomAlphaOfLength(10), randomAlphaOfLength(10));
}
}

View File

@ -1,103 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestParams;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, minNumDataNodes = 1)
public class PersistentTasksExecutorFullRestartIT extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.singletonList(TestPersistentTasksPlugin.class);
}
@Override
protected Collection<Class<? extends Plugin>> transportClientPlugins() {
return nodePlugins();
}
protected boolean ignoreExternalCluster() {
return true;
}
@TestLogging("org.elasticsearch.xpack.persistent:TRACE,org.elasticsearch.cluster.service:DEBUG")
public void testFullClusterRestart() throws Exception {
PersistentTasksService service = internalCluster().getInstance(PersistentTasksService.class);
int numberOfTasks = randomIntBetween(1, 10);
String[] taskIds = new String[numberOfTasks];
List<PlainActionFuture<PersistentTask<TestParams>>> futures = new ArrayList<>(numberOfTasks);
for (int i = 0; i < numberOfTasks; i++) {
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
futures.add(future);
taskIds[i] = UUIDs.base64UUID();
service.startPersistentTask(taskIds[i], TestPersistentTasksExecutor.NAME, randomBoolean() ? null : new TestParams("Blah"),
future);
}
for (int i = 0; i < numberOfTasks; i++) {
assertThat(futures.get(i).get().getId(), equalTo(taskIds[i]));
}
PersistentTasksCustomMetaData tasksInProgress = internalCluster().clusterService().state().getMetaData()
.custom(PersistentTasksCustomMetaData.TYPE);
assertThat(tasksInProgress.tasks().size(), equalTo(numberOfTasks));
// Make sure that at least one of the tasks is running
assertBusy(() -> {
// Wait for the task to start
assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get()
.getTasks().size(), greaterThan(0));
});
// Restart cluster
internalCluster().fullRestart();
ensureYellow();
tasksInProgress = internalCluster().clusterService().state().getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
assertThat(tasksInProgress.tasks().size(), equalTo(numberOfTasks));
// Check that cluster state is correct
for (int i = 0; i < numberOfTasks; i++) {
PersistentTask<?> task = tasksInProgress.getTask(taskIds[i]);
assertNotNull(task);
}
logger.info("Waiting for {} tasks to start", numberOfTasks);
assertBusy(() -> {
// Wait for all tasks to start
assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get()
.getTasks().size(), equalTo(numberOfTasks));
});
logger.info("Complete all tasks");
// Complete the running task and make sure it finishes properly
assertThat(new TestPersistentTasksPlugin.TestTasksRequestBuilder(client()).setOperation("finish").get().getTasks().size(),
equalTo(numberOfTasks));
assertBusy(() -> {
// Make sure the task is removed from the cluster state
assertThat(((PersistentTasksCustomMetaData) internalCluster().clusterService().state().getMetaData()
.custom(PersistentTasksCustomMetaData.TYPE)).tasks(), empty());
});
}
}

View File

@ -1,284 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.tasks.TaskInfo;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.core.persistent.PersistentTasksService.WaitForPersistentTaskStatusListener;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.Status;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestParams;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestTasksRequestBuilder;
import org.junit.After;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, minNumDataNodes = 2)
public class PersistentTasksExecutorIT extends ESIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Collections.singletonList(TestPersistentTasksPlugin.class);
}
@Override
protected Collection<Class<? extends Plugin>> transportClientPlugins() {
return nodePlugins();
}
protected boolean ignoreExternalCluster() {
return true;
}
@After
public void cleanup() throws Exception {
assertNoRunningTasks();
}
public static class WaitForPersistentTaskStatusFuture<Params extends PersistentTaskParams>
extends PlainActionFuture<PersistentTask<Params>>
implements WaitForPersistentTaskStatusListener<Params> {
}
public void testPersistentActionFailure() throws Exception {
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
persistentTasksService.startPersistentTask(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
long allocationId = future.get().getAllocationId();
assertBusy(() -> {
// Wait for the task to start
assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get()
.getTasks().size(), equalTo(1));
});
TaskInfo firstRunningTask = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]")
.get().getTasks().get(0);
logger.info("Found running task with id {} and parent {}", firstRunningTask.getId(), firstRunningTask.getParentTaskId());
// Verifying parent
assertThat(firstRunningTask.getParentTaskId().getId(), equalTo(allocationId));
assertThat(firstRunningTask.getParentTaskId().getNodeId(), equalTo("cluster"));
logger.info("Failing the running task");
// Fail the running task and make sure it restarts properly
assertThat(new TestTasksRequestBuilder(client()).setOperation("fail").setTaskId(firstRunningTask.getTaskId())
.get().getTasks().size(), equalTo(1));
logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.getId());
assertBusy(() -> {
// Wait for the task to disappear completely
assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(),
empty());
});
}
public void testPersistentActionCompletion() throws Exception {
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
String taskId = UUIDs.base64UUID();
persistentTasksService.startPersistentTask(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
long allocationId = future.get().getAllocationId();
assertBusy(() -> {
// Wait for the task to start
assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get()
.getTasks().size(), equalTo(1));
});
TaskInfo firstRunningTask = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]")
.setDetailed(true).get().getTasks().get(0);
logger.info("Found running task with id {} and parent {}", firstRunningTask.getId(), firstRunningTask.getParentTaskId());
// Verifying parent and description
assertThat(firstRunningTask.getParentTaskId().getId(), equalTo(allocationId));
assertThat(firstRunningTask.getParentTaskId().getNodeId(), equalTo("cluster"));
assertThat(firstRunningTask.getDescription(), equalTo("id=" + taskId));
if (randomBoolean()) {
logger.info("Simulating errant completion notification");
//try sending completion request with incorrect allocation id
PlainActionFuture<PersistentTask<?>> failedCompletionNotificationFuture = new PlainActionFuture<>();
persistentTasksService.sendCompletionNotification(taskId, Long.MAX_VALUE, null, failedCompletionNotificationFuture);
assertThrows(failedCompletionNotificationFuture, ResourceNotFoundException.class);
// Make sure that the task is still running
assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]")
.setDetailed(true).get().getTasks().size(), equalTo(1));
}
stopOrCancelTask(firstRunningTask.getTaskId());
}
public void testPersistentActionWithNoAvailableNode() throws Exception {
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
TestParams testParams = new TestParams("Blah");
testParams.setExecutorNodeAttr("test");
persistentTasksService.startPersistentTask(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, testParams, future);
String taskId = future.get().getId();
Settings nodeSettings = Settings.builder().put(nodeSettings(0)).put("node.attr.test_attr", "test").build();
String newNode = internalCluster().startNode(nodeSettings);
String newNodeId = internalCluster().clusterService(newNode).localNode().getId();
assertBusy(() -> {
// Wait for the task to start
assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks()
.size(), equalTo(1));
});
TaskInfo taskInfo = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]")
.get().getTasks().get(0);
// Verifying the the task runs on the new node
assertThat(taskInfo.getTaskId().getNodeId(), equalTo(newNodeId));
internalCluster().stopRandomNode(settings -> "test".equals(settings.get("node.attr.test_attr")));
assertBusy(() -> {
// Wait for the task to disappear completely
assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(),
empty());
});
// Remove the persistent task
PlainActionFuture<PersistentTask<?>> removeFuture = new PlainActionFuture<>();
persistentTasksService.cancelPersistentTask(taskId, removeFuture);
assertEquals(removeFuture.get().getId(), taskId);
}
public void testPersistentActionStatusUpdate() throws Exception {
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
persistentTasksService.startPersistentTask(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
String taskId = future.get().getId();
assertBusy(() -> {
// Wait for the task to start
assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks()
.size(), equalTo(1));
});
TaskInfo firstRunningTask = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]")
.get().getTasks().get(0);
PersistentTasksCustomMetaData tasksInProgress = internalCluster().clusterService().state().getMetaData()
.custom(PersistentTasksCustomMetaData.TYPE);
assertThat(tasksInProgress.tasks().size(), equalTo(1));
assertThat(tasksInProgress.tasks().iterator().next().getStatus(), nullValue());
int numberOfUpdates = randomIntBetween(1, 10);
for (int i = 0; i < numberOfUpdates; i++) {
logger.info("Updating the task status");
// Complete the running task and make sure it finishes properly
assertThat(new TestTasksRequestBuilder(client()).setOperation("update_status").setTaskId(firstRunningTask.getTaskId())
.get().getTasks().size(), equalTo(1));
int finalI = i;
WaitForPersistentTaskStatusFuture<?> future1 = new WaitForPersistentTaskStatusFuture<>();
persistentTasksService.waitForPersistentTaskStatus(taskId,
task -> task != null && task.getStatus() != null && task.getStatus().toString() != null &&
task.getStatus().toString().equals("{\"phase\":\"phase " + (finalI + 1) + "\"}"),
TimeValue.timeValueSeconds(10), future1);
assertThat(future1.get().getId(), equalTo(taskId));
}
WaitForPersistentTaskStatusFuture<?> future1 = new WaitForPersistentTaskStatusFuture<>();
persistentTasksService.waitForPersistentTaskStatus(taskId,
task -> false, TimeValue.timeValueMillis(10), future1);
assertThrows(future1, IllegalStateException.class, "timed out after 10ms");
PlainActionFuture<PersistentTask<?>> failedUpdateFuture = new PlainActionFuture<>();
persistentTasksService.updateStatus(taskId, -2, new Status("should fail"), failedUpdateFuture);
assertThrows(failedUpdateFuture, ResourceNotFoundException.class, "the task with id " + taskId +
" and allocation id -2 doesn't exist");
// Wait for the task to disappear
WaitForPersistentTaskStatusFuture<?> future2 = new WaitForPersistentTaskStatusFuture<>();
persistentTasksService.waitForPersistentTaskStatus(taskId, Objects::isNull, TimeValue.timeValueSeconds(10), future2);
logger.info("Completing the running task");
// Complete the running task and make sure it finishes properly
assertThat(new TestTasksRequestBuilder(client()).setOperation("finish").setTaskId(firstRunningTask.getTaskId())
.get().getTasks().size(), equalTo(1));
assertThat(future2.get(), nullValue());
}
public void testCreatePersistentTaskWithDuplicateId() throws Exception {
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
String taskId = UUIDs.base64UUID();
persistentTasksService.startPersistentTask(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
future.get();
PlainActionFuture<PersistentTask<TestParams>> future2 = new PlainActionFuture<>();
persistentTasksService.startPersistentTask(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future2);
assertThrows(future2, ResourceAlreadyExistsException.class);
assertBusy(() -> {
// Wait for the task to start
assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get()
.getTasks().size(), equalTo(1));
});
TaskInfo firstRunningTask = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]")
.get().getTasks().get(0);
logger.info("Completing the running task");
// Fail the running task and make sure it restarts properly
assertThat(new TestTasksRequestBuilder(client()).setOperation("finish").setTaskId(firstRunningTask.getTaskId())
.get().getTasks().size(), equalTo(1));
logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.getId());
assertBusy(() -> {
// Wait for the task to disappear completely
assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(),
empty());
});
}
private void stopOrCancelTask(TaskId taskId) {
if (randomBoolean()) {
logger.info("Completing the running task");
// Complete the running task and make sure it finishes properly
assertThat(new TestTasksRequestBuilder(client()).setOperation("finish").setTaskId(taskId)
.get().getTasks().size(), equalTo(1));
} else {
logger.info("Cancelling the running task");
// Cancel the running task and make sure it finishes properly
assertThat(client().admin().cluster().prepareCancelTasks().setTaskId(taskId)
.get().getTasks().size(), equalTo(1));
}
}
private void assertNoRunningTasks() throws Exception {
assertBusy(() -> {
// Wait for the task to finish
List<TaskInfo> tasks = client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get()
.getTasks();
logger.info("Found {} tasks", tasks.size());
assertThat(tasks.size(), equalTo(0));
// Make sure the task is removed from the cluster state
assertThat(((PersistentTasksCustomMetaData) internalCluster().clusterService().state().getMetaData()
.custom(PersistentTasksCustomMetaData.TYPE)).tasks(), empty());
});
}
}

View File

@ -1,42 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.test.AbstractStreamableTestCase;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor;
import java.util.Collections;
public class PersistentTasksExecutorResponseTests extends AbstractStreamableTestCase<PersistentTaskResponse> {
@Override
protected PersistentTaskResponse createTestInstance() {
if (randomBoolean()) {
return new PersistentTaskResponse(
new PersistentTask<PersistentTaskParams>(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME,
new TestPersistentTasksPlugin.TestParams("test"),
randomLong(), PersistentTasksCustomMetaData.INITIAL_ASSIGNMENT));
} else {
return new PersistentTaskResponse(null);
}
}
@Override
protected PersistentTaskResponse createBlankInstance() {
return new PersistentTaskResponse();
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return new NamedWriteableRegistry(Collections.singletonList(
new NamedWriteableRegistry.Entry(PersistentTaskParams.class,
TestPersistentTasksExecutor.NAME, TestPersistentTasksPlugin.TestParams::new)
));
}
}

View File

@ -1,29 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.test.AbstractWireSerializingTestCase;
import org.elasticsearch.xpack.core.persistent.PersistentTasksNodeService.Status;
import static org.hamcrest.Matchers.containsString;
public class PersistentTasksNodeServiceStatusTests extends AbstractWireSerializingTestCase<Status> {
@Override
protected Status createTestInstance() {
return new Status(randomFrom(AllocatedPersistentTask.State.values()));
}
@Override
protected Writeable.Reader<Status> instanceReader() {
return Status::new;
}
public void testToString() {
assertThat(createTestInstance().toString(), containsString("state"));
}
}

View File

@ -1,357 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.Assignment;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestParams;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor;
import org.junit.After;
import org.junit.Before;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.sameInstance;
import static org.hamcrest.core.IsEqual.equalTo;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class PersistentTasksNodeServiceTests extends ESTestCase {
private ThreadPool threadPool;
@Override
@Before
public void setUp() throws Exception {
super.setUp();
threadPool = new TestThreadPool(getClass().getName());
}
@Override
@After
public void tearDown() throws Exception {
terminate(threadPool);
super.tearDown();
}
private ClusterState createInitialClusterState(int nonLocalNodesCount, Settings settings) {
ClusterState.Builder state = ClusterState.builder(new ClusterName("PersistentActionExecutorTests"));
state.metaData(MetaData.builder().generateClusterUuidIfNeeded());
state.routingTable(RoutingTable.builder().build());
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
nodes.add(DiscoveryNode.createLocal(settings, buildNewFakeTransportAddress(), "this_node"));
for (int i = 0; i < nonLocalNodesCount; i++) {
nodes.add(new DiscoveryNode("other_node_" + i, buildNewFakeTransportAddress(), Version.CURRENT));
}
nodes.localNodeId("this_node");
state.nodes(nodes);
return state.build();
}
public void testStartTask() throws Exception {
PersistentTasksService persistentTasksService = mock(PersistentTasksService.class);
@SuppressWarnings("unchecked") PersistentTasksExecutor<TestParams> action = mock(PersistentTasksExecutor.class);
when(action.getExecutor()).thenReturn(ThreadPool.Names.SAME);
when(action.getTaskName()).thenReturn(TestPersistentTasksExecutor.NAME);
int nonLocalNodesCount = randomInt(10);
// need to account for 5 original tasks on each node and their relocations
for (int i = 0; i < (nonLocalNodesCount + 1) * 10; i++) {
TaskId parentId = new TaskId("cluster", i);
when(action.createTask(anyLong(), anyString(), anyString(), eq(parentId), any(), any())).thenReturn(
new TestPersistentTasksPlugin.TestTask(i, "persistent", "test", "", parentId, Collections.emptyMap()));
}
PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Settings.EMPTY, Collections.singletonList(action));
MockExecutor executor = new MockExecutor();
PersistentTasksNodeService coordinator = new PersistentTasksNodeService(Settings.EMPTY, persistentTasksService,
registry, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), executor);
ClusterState state = createInitialClusterState(nonLocalNodesCount, Settings.EMPTY);
PersistentTasksCustomMetaData.Builder tasks = PersistentTasksCustomMetaData.builder();
boolean added = false;
if (nonLocalNodesCount > 0) {
for (int i = 0; i < randomInt(5); i++) {
tasks.addTask(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("other_" + i),
new Assignment("other_node_" + randomInt(nonLocalNodesCount), "test assignment on other node"));
if (added == false && randomBoolean()) {
added = true;
tasks.addTask(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("this_param"),
new Assignment("this_node", "test assignment on this node"));
}
}
}
if (added == false) {
logger.info("No local node action was added");
}
MetaData.Builder metaData = MetaData.builder(state.metaData());
metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks.build());
ClusterState newClusterState = ClusterState.builder(state).metaData(metaData).build();
coordinator.clusterChanged(new ClusterChangedEvent("test", newClusterState, state));
if (added) {
// Action for this node was added, let's make sure it was invoked
assertThat(executor.executions.size(), equalTo(1));
// Add task on some other node
state = newClusterState;
newClusterState = addTask(state, TestPersistentTasksExecutor.NAME, null, "some_other_node");
coordinator.clusterChanged(new ClusterChangedEvent("test", newClusterState, state));
// Make sure action wasn't called again
assertThat(executor.executions.size(), equalTo(1));
// Start another task on this node
state = newClusterState;
newClusterState = addTask(state, TestPersistentTasksExecutor.NAME, new TestParams("this_param"), "this_node");
coordinator.clusterChanged(new ClusterChangedEvent("test", newClusterState, state));
// Make sure action was called this time
assertThat(executor.size(), equalTo(2));
// Finish both tasks
executor.get(0).task.markAsFailed(new RuntimeException());
executor.get(1).task.markAsCompleted();
String failedTaskId = executor.get(0).task.getPersistentTaskId();
String finishedTaskId = executor.get(1).task.getPersistentTaskId();
executor.clear();
// Add task on some other node
state = newClusterState;
newClusterState = addTask(state, TestPersistentTasksExecutor.NAME, null, "some_other_node");
coordinator.clusterChanged(new ClusterChangedEvent("test", newClusterState, state));
// Make sure action wasn't called again
assertThat(executor.size(), equalTo(0));
// Simulate reallocation of the failed task on the same node
state = newClusterState;
newClusterState = reallocateTask(state, failedTaskId, "this_node");
coordinator.clusterChanged(new ClusterChangedEvent("test", newClusterState, state));
// Simulate removal of the finished task
state = newClusterState;
newClusterState = removeTask(state, finishedTaskId);
coordinator.clusterChanged(new ClusterChangedEvent("test", newClusterState, state));
// Make sure action was only allocated on this node once
assertThat(executor.size(), equalTo(1));
}
}
public void testParamsStatusAndNodeTaskAreDelegated() throws Exception {
PersistentTasksService persistentTasksService = mock(PersistentTasksService.class);
@SuppressWarnings("unchecked") PersistentTasksExecutor<TestParams> action = mock(PersistentTasksExecutor.class);
when(action.getExecutor()).thenReturn(ThreadPool.Names.SAME);
when(action.getTaskName()).thenReturn(TestPersistentTasksExecutor.NAME);
TaskId parentId = new TaskId("cluster", 1);
AllocatedPersistentTask nodeTask =
new TestPersistentTasksPlugin.TestTask(0, "persistent", "test", "", parentId, Collections.emptyMap());
when(action.createTask(anyLong(), anyString(), anyString(), eq(parentId), any(), any())).thenReturn(nodeTask);
PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Settings.EMPTY, Collections.singletonList(action));
MockExecutor executor = new MockExecutor();
PersistentTasksNodeService coordinator = new PersistentTasksNodeService(Settings.EMPTY, persistentTasksService,
registry, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet()), executor);
ClusterState state = createInitialClusterState(1, Settings.EMPTY);
Task.Status status = new TestPersistentTasksPlugin.Status("_test_phase");
PersistentTasksCustomMetaData.Builder tasks = PersistentTasksCustomMetaData.builder();
String taskId = UUIDs.base64UUID();
TestParams taskParams = new TestParams("other_0");
tasks.addTask(taskId, TestPersistentTasksExecutor.NAME, taskParams,
new Assignment("this_node", "test assignment on other node"));
tasks.updateTaskStatus(taskId, status);
MetaData.Builder metaData = MetaData.builder(state.metaData());
metaData.putCustom(PersistentTasksCustomMetaData.TYPE, tasks.build());
ClusterState newClusterState = ClusterState.builder(state).metaData(metaData).build();
coordinator.clusterChanged(new ClusterChangedEvent("test", newClusterState, state));
assertThat(executor.size(), equalTo(1));
assertThat(executor.get(0).params, sameInstance(taskParams));
assertThat(executor.get(0).status, sameInstance(status));
assertThat(executor.get(0).task, sameInstance(nodeTask));
}
public void testTaskCancellation() {
AtomicLong capturedTaskId = new AtomicLong();
AtomicReference<ActionListener<CancelTasksResponse>> capturedListener = new AtomicReference<>();
PersistentTasksService persistentTasksService = new PersistentTasksService(Settings.EMPTY, null, null, null) {
@Override
public void sendTaskManagerCancellation(long taskId, ActionListener<CancelTasksResponse> listener) {
capturedTaskId.set(taskId);
capturedListener.set(listener);
}
@Override
public void sendCompletionNotification(String taskId, long allocationId, Exception failure,
ActionListener<PersistentTask<?>> listener) {
fail("Shouldn't be called during Cluster State cancellation");
}
};
@SuppressWarnings("unchecked") PersistentTasksExecutor<TestParams> action = mock(PersistentTasksExecutor.class);
when(action.getExecutor()).thenReturn(ThreadPool.Names.SAME);
when(action.getTaskName()).thenReturn("test");
when(action.createTask(anyLong(), anyString(), anyString(), any(), any(), any()))
.thenReturn(new TestPersistentTasksPlugin.TestTask(1, "persistent", "test", "", new TaskId("cluster", 1),
Collections.emptyMap()));
PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(Settings.EMPTY, Collections.singletonList(action));
int nonLocalNodesCount = randomInt(10);
MockExecutor executor = new MockExecutor();
TaskManager taskManager = new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet());
PersistentTasksNodeService coordinator = new PersistentTasksNodeService(Settings.EMPTY, persistentTasksService,
registry, taskManager, executor);
ClusterState state = createInitialClusterState(nonLocalNodesCount, Settings.EMPTY);
ClusterState newClusterState = state;
// Allocate first task
state = newClusterState;
newClusterState = addTask(state, "test", null, "this_node");
coordinator.clusterChanged(new ClusterChangedEvent("test", newClusterState, state));
// Check the the task is know to the task manager
assertThat(taskManager.getTasks().size(), equalTo(1));
AllocatedPersistentTask runningTask = (AllocatedPersistentTask)taskManager.getTasks().values().iterator().next();
String persistentId = runningTask.getPersistentTaskId();
long localId = runningTask.getId();
// Make sure it returns correct status
Task.Status status = runningTask.getStatus();
assertThat(status.toString(), equalTo("{\"state\":\"STARTED\"}"));
state = newClusterState;
// Relocate the task to some other node or remove it completely
if (randomBoolean()) {
newClusterState = reallocateTask(state, persistentId, "some_other_node");
} else {
newClusterState = removeTask(state, persistentId);
}
coordinator.clusterChanged(new ClusterChangedEvent("test", newClusterState, state));
// Make sure it returns correct status
assertThat(taskManager.getTasks().size(), equalTo(1));
assertThat(taskManager.getTasks().values().iterator().next().getStatus().toString(), equalTo("{\"state\":\"PENDING_CANCEL\"}"));
// That should trigger cancellation request
assertThat(capturedTaskId.get(), equalTo(localId));
// Notify successful cancellation
capturedListener.get().onResponse(new CancelTasksResponse());
// finish or fail task
if (randomBoolean()) {
executor.get(0).task.markAsCompleted();
} else {
executor.get(0).task.markAsFailed(new IOException("test"));
}
// Check the the task is now removed from task manager
assertThat(taskManager.getTasks().values(), empty());
}
private <Params extends PersistentTaskParams> ClusterState addTask(ClusterState state, String action, Params params,
String node) {
PersistentTasksCustomMetaData.Builder builder =
PersistentTasksCustomMetaData.builder(state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE));
return ClusterState.builder(state).metaData(MetaData.builder(state.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE,
builder.addTask(UUIDs.base64UUID(), action, params, new Assignment(node, "test assignment")).build())).build();
}
private ClusterState reallocateTask(ClusterState state, String taskId, String node) {
PersistentTasksCustomMetaData.Builder builder =
PersistentTasksCustomMetaData.builder(state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE));
assertTrue(builder.hasTask(taskId));
return ClusterState.builder(state).metaData(MetaData.builder(state.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE,
builder.reassignTask(taskId, new Assignment(node, "test assignment")).build())).build();
}
private ClusterState removeTask(ClusterState state, String taskId) {
PersistentTasksCustomMetaData.Builder builder =
PersistentTasksCustomMetaData.builder(state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE));
assertTrue(builder.hasTask(taskId));
return ClusterState.builder(state).metaData(MetaData.builder(state.metaData()).putCustom(PersistentTasksCustomMetaData.TYPE,
builder.removeTask(taskId).build())).build();
}
private class Execution {
private final PersistentTaskParams params;
private final AllocatedPersistentTask task;
private final Task.Status status;
private final PersistentTasksExecutor<?> holder;
Execution(PersistentTaskParams params, AllocatedPersistentTask task, Task.Status status, PersistentTasksExecutor<?> holder) {
this.params = params;
this.task = task;
this.status = status;
this.holder = holder;
}
}
private class MockExecutor extends NodePersistentTasksExecutor {
private List<Execution> executions = new ArrayList<>();
MockExecutor() {
super(null);
}
@Override
public <Params extends PersistentTaskParams> void executeTask(Params params,
Task.Status status,
AllocatedPersistentTask task,
PersistentTasksExecutor<Params> executor) {
executions.add(new Execution(params, task, status, executor));
}
public Execution get(int i) {
return executions.get(i);
}
public int size() {
return executions.size();
}
public void clear() {
executions.clear();
}
}
}

View File

@ -1,22 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.test.AbstractStreamableTestCase;
import org.elasticsearch.xpack.core.persistent.CompletionPersistentTaskAction.Request;
public class RestartPersistentTaskRequestTests extends AbstractStreamableTestCase<Request> {
@Override
protected Request createTestInstance() {
return new Request(randomAlphaOfLength(10), randomLong(), null);
}
@Override
protected Request createBlankInstance() {
return new Request();
}
}

View File

@ -1,48 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
import org.elasticsearch.test.AbstractStreamableTestCase;
import org.elasticsearch.xpack.core.persistent.StartPersistentTaskAction.Request;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestParams;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor;
import java.util.Collections;
public class StartPersistentActionRequestTests extends AbstractStreamableTestCase<Request> {
@Override
protected Request createTestInstance() {
TestParams testParams;
if (randomBoolean()) {
testParams = new TestParams();
if (randomBoolean()) {
testParams.setTestParam(randomAlphaOfLengthBetween(1, 20));
}
if (randomBoolean()) {
testParams.setExecutorNodeAttr(randomAlphaOfLengthBetween(1, 20));
}
} else {
testParams = null;
}
return new Request(UUIDs.base64UUID(), randomAlphaOfLengthBetween(1, 20), testParams);
}
@Override
protected Request createBlankInstance() {
return new Request();
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return new NamedWriteableRegistry(Collections.singletonList(
new Entry(PersistentTaskParams.class, TestPersistentTasksExecutor.NAME, TestParams::new)
));
}
}

View File

@ -1,540 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
import org.elasticsearch.action.support.tasks.TasksRequestBuilder;
import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskCancelledException;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.watcher.ResourceWatcherService;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.Assignment;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import static java.util.Objects.requireNonNull;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
import static org.elasticsearch.test.ESTestCase.awaitBusy;
import static org.elasticsearch.test.ESTestCase.randomBoolean;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* A plugin that adds a test persistent task.
*/
public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin {
@Override
public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() {
return Arrays.asList(
new ActionHandler<>(TestTaskAction.INSTANCE, TransportTestTaskAction.class),
new ActionHandler<>(StartPersistentTaskAction.INSTANCE, StartPersistentTaskAction.TransportAction.class),
new ActionHandler<>(UpdatePersistentTaskStatusAction.INSTANCE, UpdatePersistentTaskStatusAction.TransportAction.class),
new ActionHandler<>(CompletionPersistentTaskAction.INSTANCE, CompletionPersistentTaskAction.TransportAction.class),
new ActionHandler<>(RemovePersistentTaskAction.INSTANCE, RemovePersistentTaskAction.TransportAction.class)
);
}
@Override
public Collection<Object> createComponents(Client client, ClusterService clusterService, ThreadPool threadPool,
ResourceWatcherService resourceWatcherService, ScriptService scriptService,
NamedXContentRegistry xContentRegistry, Environment environment,
NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) {
PersistentTasksService persistentTasksService = new PersistentTasksService(Settings.EMPTY, clusterService, threadPool, client);
TestPersistentTasksExecutor testPersistentAction = new TestPersistentTasksExecutor(Settings.EMPTY, clusterService);
PersistentTasksExecutorRegistry persistentTasksExecutorRegistry = new PersistentTasksExecutorRegistry(Settings.EMPTY,
Collections.singletonList(testPersistentAction));
return Arrays.asList(
persistentTasksService,
persistentTasksExecutorRegistry,
new PersistentTasksClusterService(Settings.EMPTY, persistentTasksExecutorRegistry, clusterService)
);
}
@Override
public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
return Arrays.asList(
new NamedWriteableRegistry.Entry(PersistentTaskParams.class, TestPersistentTasksExecutor.NAME, TestParams::new),
new NamedWriteableRegistry.Entry(Task.Status.class,
PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new),
new NamedWriteableRegistry.Entry(MetaData.Custom.class, PersistentTasksCustomMetaData.TYPE,
PersistentTasksCustomMetaData::new),
new NamedWriteableRegistry.Entry(NamedDiff.class, PersistentTasksCustomMetaData.TYPE,
PersistentTasksCustomMetaData::readDiffFrom),
new NamedWriteableRegistry.Entry(Task.Status.class, TestPersistentTasksExecutor.NAME, Status::new)
);
}
@Override
public List<NamedXContentRegistry.Entry> getNamedXContent() {
return Arrays.asList(
new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(PersistentTasksCustomMetaData.TYPE),
PersistentTasksCustomMetaData::fromXContent),
new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(TestPersistentTasksExecutor.NAME),
TestParams::fromXContent),
new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(TestPersistentTasksExecutor.NAME), Status::fromXContent)
);
}
public static class TestParams implements PersistentTaskParams {
public static final ConstructingObjectParser<TestParams, Void> REQUEST_PARSER =
new ConstructingObjectParser<>(TestPersistentTasksExecutor.NAME, args -> new TestParams((String) args[0]));
static {
REQUEST_PARSER.declareString(constructorArg(), new ParseField("param"));
}
private String executorNodeAttr = null;
private String responseNode = null;
private String testParam = null;
public TestParams() {
}
public TestParams(String testParam) {
this.testParam = testParam;
}
public TestParams(StreamInput in) throws IOException {
executorNodeAttr = in.readOptionalString();
responseNode = in.readOptionalString();
testParam = in.readOptionalString();
}
@Override
public String getWriteableName() {
return TestPersistentTasksExecutor.NAME;
}
public void setExecutorNodeAttr(String executorNodeAttr) {
this.executorNodeAttr = executorNodeAttr;
}
public void setTestParam(String testParam) {
this.testParam = testParam;
}
public String getExecutorNodeAttr() {
return executorNodeAttr;
}
public String getTestParam() {
return testParam;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(executorNodeAttr);
out.writeOptionalString(responseNode);
out.writeOptionalString(testParam);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("param", testParam);
builder.endObject();
return builder;
}
public static TestParams fromXContent(XContentParser parser) throws IOException {
return REQUEST_PARSER.parse(parser, null);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TestParams that = (TestParams) o;
return Objects.equals(executorNodeAttr, that.executorNodeAttr) &&
Objects.equals(responseNode, that.responseNode) &&
Objects.equals(testParam, that.testParam);
}
@Override
public int hashCode() {
return Objects.hash(executorNodeAttr, responseNode, testParam);
}
}
public static class Status implements Task.Status {
private final String phase;
public static final ConstructingObjectParser<Status, Void> STATUS_PARSER =
new ConstructingObjectParser<>(TestPersistentTasksExecutor.NAME, args -> new Status((String) args[0]));
static {
STATUS_PARSER.declareString(constructorArg(), new ParseField("phase"));
}
public Status(String phase) {
this.phase = requireNonNull(phase, "Phase cannot be null");
}
public Status(StreamInput in) throws IOException {
phase = in.readString();
}
@Override
public String getWriteableName() {
return TestPersistentTasksExecutor.NAME;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("phase", phase);
builder.endObject();
return builder;
}
public static Task.Status fromXContent(XContentParser parser) throws IOException {
return STATUS_PARSER.parse(parser, null);
}
@Override
public boolean isFragment() {
return false;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(phase);
}
@Override
public String toString() {
return Strings.toString(this);
}
// Implements equals and hashcode for testing
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != Status.class) {
return false;
}
Status other = (Status) obj;
return phase.equals(other.phase);
}
@Override
public int hashCode() {
return phase.hashCode();
}
}
public static class TestPersistentTasksExecutor extends PersistentTasksExecutor<TestParams> {
public static final String NAME = "cluster:admin/persistent/test";
private final ClusterService clusterService;
public TestPersistentTasksExecutor(Settings settings, ClusterService clusterService) {
super(settings, NAME, ThreadPool.Names.GENERIC);
this.clusterService = clusterService;
}
@Override
public Assignment getAssignment(TestParams params, ClusterState clusterState) {
if (params == null || params.getExecutorNodeAttr() == null) {
return super.getAssignment(params, clusterState);
} else {
DiscoveryNode executorNode = selectLeastLoadedNode(clusterState,
discoveryNode -> params.getExecutorNodeAttr().equals(discoveryNode.getAttributes().get("test_attr")));
if (executorNode != null) {
return new Assignment(executorNode.getId(), "test assignment");
} else {
return NO_NODE_FOUND;
}
}
}
@Override
protected void nodeOperation(AllocatedPersistentTask task, TestParams params, Task.Status status) {
logger.info("started node operation for the task {}", task);
try {
TestTask testTask = (TestTask) task;
AtomicInteger phase = new AtomicInteger();
while (true) {
// wait for something to happen
assertTrue(awaitBusy(() -> testTask.isCancelled() ||
testTask.getOperation() != null ||
clusterService.lifecycleState() != Lifecycle.State.STARTED, // speedup finishing on closed nodes
30, TimeUnit.SECONDS)); // This can take a while during large cluster restart
if (clusterService.lifecycleState() != Lifecycle.State.STARTED) {
return;
}
if ("finish".equals(testTask.getOperation())) {
task.markAsCompleted();
return;
} else if ("fail".equals(testTask.getOperation())) {
task.markAsFailed(new RuntimeException("Simulating failure"));
return;
} else if ("update_status".equals(testTask.getOperation())) {
testTask.setOperation(null);
CountDownLatch latch = new CountDownLatch(1);
Status newStatus = new Status("phase " + phase.incrementAndGet());
logger.info("updating the task status to {}", newStatus);
task.updatePersistentStatus(newStatus, new ActionListener<PersistentTask<?>>() {
@Override
public void onResponse(PersistentTask<?> persistentTask) {
logger.info("updating was successful");
latch.countDown();
}
@Override
public void onFailure(Exception e) {
logger.info("updating failed", e);
latch.countDown();
fail(e.toString());
}
});
assertTrue(latch.await(10, TimeUnit.SECONDS));
} else if (testTask.isCancelled()) {
// Cancellation make cause different ways for the task to finish
if (randomBoolean()) {
if (randomBoolean()) {
task.markAsFailed(new TaskCancelledException(testTask.getReasonCancelled()));
} else {
task.markAsCompleted();
}
} else {
task.markAsFailed(new RuntimeException(testTask.getReasonCancelled()));
}
return;
} else {
fail("We really shouldn't be here");
}
}
} catch (InterruptedException e) {
task.markAsFailed(e);
}
}
@Override
protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId,
PersistentTask<TestParams> task, Map<String, String> headers) {
return new TestTask(id, type, action, getDescription(task), parentTaskId, headers);
}
}
public static class TestTaskAction extends Action<TestTasksRequest, TestTasksResponse, TestTasksRequestBuilder> {
public static final TestTaskAction INSTANCE = new TestTaskAction();
public static final String NAME = "cluster:admin/persistent/task_test";
private TestTaskAction() {
super(NAME);
}
@Override
public TestTasksResponse newResponse() {
return new TestTasksResponse();
}
@Override
public TestTasksRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new TestTasksRequestBuilder(client);
}
}
public static class TestTask extends AllocatedPersistentTask {
private volatile String operation;
public TestTask(long id, String type, String action, String description, TaskId parentTask, Map<String, String> headers) {
super(id, type, action, description, parentTask, headers);
}
public String getOperation() {
return operation;
}
public void setOperation(String operation) {
this.operation = operation;
}
@Override
public String toString() {
return "TestTask[" + this.getId() + ", " + this.getParentTaskId() + ", " + this.getOperation() + "]";
}
}
static class TestTaskResponse implements Writeable {
TestTaskResponse() {
}
TestTaskResponse(StreamInput in) throws IOException {
in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(true);
}
}
public static class TestTasksRequest extends BaseTasksRequest<TestTasksRequest> {
private String operation;
public TestTasksRequest() {
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
operation = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeOptionalString(operation);
}
public void setOperation(String operation) {
this.operation = operation;
}
public String getOperation() {
return operation;
}
}
public static class TestTasksRequestBuilder extends TasksRequestBuilder<TestTasksRequest, TestTasksResponse, TestTasksRequestBuilder> {
protected TestTasksRequestBuilder(ElasticsearchClient client) {
super(client, TestTaskAction.INSTANCE, new TestTasksRequest());
}
public TestTasksRequestBuilder setOperation(String operation) {
request.setOperation(operation);
return this;
}
}
public static class TestTasksResponse extends BaseTasksResponse {
private List<TestTaskResponse> tasks;
public TestTasksResponse() {
super(null, null);
}
public TestTasksResponse(List<TestTaskResponse> tasks, List<TaskOperationFailure> taskFailures,
List<? extends FailedNodeException> nodeFailures) {
super(taskFailures, nodeFailures);
this.tasks = tasks == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(tasks));
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
tasks = in.readList(TestTaskResponse::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeList(tasks);
}
public List<TestTaskResponse> getTasks() {
return tasks;
}
}
public static class TransportTestTaskAction extends TransportTasksAction<TestTask,
TestTasksRequest, TestTasksResponse, TestTaskResponse> {
@Inject
public TransportTestTaskAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, String nodeExecutor) {
super(settings, TestTaskAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
TestTasksRequest::new, TestTasksResponse::new, ThreadPool.Names.MANAGEMENT);
}
@Override
protected TestTasksResponse newResponse(TestTasksRequest request, List<TestTaskResponse> tasks,
List<TaskOperationFailure> taskOperationFailures,
List<FailedNodeException> failedNodeExceptions) {
return new TestTasksResponse(tasks, taskOperationFailures, failedNodeExceptions);
}
@Override
protected TestTaskResponse readTaskResponse(StreamInput in) throws IOException {
return new TestTaskResponse(in);
}
@Override
protected void taskOperation(TestTasksRequest request, TestTask task, ActionListener<TestTaskResponse> listener) {
task.setOperation(request.operation);
listener.onResponse(new TestTaskResponse());
}
}
}

View File

@ -1,36 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.core.persistent;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.AbstractStreamableTestCase;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.Status;
import org.elasticsearch.xpack.core.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor;
import org.elasticsearch.xpack.core.persistent.UpdatePersistentTaskStatusAction.Request;
import java.util.Collections;
public class UpdatePersistentTaskRequestTests extends AbstractStreamableTestCase<Request> {
@Override
protected Request createTestInstance() {
return new Request(UUIDs.base64UUID(), randomLong(), new Status(randomAlphaOfLength(10)));
}
@Override
protected Request createBlankInstance() {
return new Request();
}
@Override
protected NamedWriteableRegistry getNamedWriteableRegistry() {
return new NamedWriteableRegistry(Collections.singletonList(
new NamedWriteableRegistry.Entry(Task.Status.class, TestPersistentTasksExecutor.NAME, Status::new)
));
}
}

View File

@ -41,6 +41,7 @@ import org.elasticsearch.monitor.os.OsProbe;
import org.elasticsearch.monitor.os.OsStats;
import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.AnalysisPlugin;
import org.elasticsearch.plugins.PersistentTaskPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestHandler;
@ -101,14 +102,14 @@ import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex;
import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings;
import org.elasticsearch.xpack.core.ml.notifications.AuditMessage;
import org.elasticsearch.xpack.core.ml.notifications.AuditorField;
import org.elasticsearch.xpack.core.persistent.CompletionPersistentTaskAction;
import org.elasticsearch.xpack.core.persistent.PersistentTasksClusterService;
import org.elasticsearch.xpack.core.persistent.PersistentTasksExecutor;
import org.elasticsearch.xpack.core.persistent.PersistentTasksExecutorRegistry;
import org.elasticsearch.xpack.core.persistent.PersistentTasksService;
import org.elasticsearch.xpack.core.persistent.RemovePersistentTaskAction;
import org.elasticsearch.xpack.core.persistent.StartPersistentTaskAction;
import org.elasticsearch.xpack.core.persistent.UpdatePersistentTaskStatusAction;
import org.elasticsearch.persistent.CompletionPersistentTaskAction;
import org.elasticsearch.persistent.PersistentTasksClusterService;
import org.elasticsearch.persistent.PersistentTasksExecutor;
import org.elasticsearch.persistent.PersistentTasksExecutorRegistry;
import org.elasticsearch.persistent.PersistentTasksService;
import org.elasticsearch.persistent.RemovePersistentTaskAction;
import org.elasticsearch.persistent.StartPersistentTaskAction;
import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction;
import org.elasticsearch.xpack.core.template.TemplateUtils;
import org.elasticsearch.xpack.ml.action.TransportCloseJobAction;
import org.elasticsearch.xpack.ml.action.TransportDeleteCalendarAction;
@ -233,7 +234,7 @@ import java.util.function.UnaryOperator;
import static java.util.Collections.emptyList;
public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlugin {
public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlugin, PersistentTaskPlugin {
public static final String NAME = "ml";
public static final String BASE_PATH = "/_xpack/ml/";
public static final String DATAFEED_THREAD_POOL_NAME = NAME + "_datafeed";
@ -352,33 +353,6 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu
ResourceWatcherService resourceWatcherService, ScriptService scriptService,
NamedXContentRegistry xContentRegistry, Environment environment,
NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) {
List<Object> components = new ArrayList<>();
PersistentTasksService persistentTasksService = new PersistentTasksService(settings, clusterService, threadPool, client);
components.addAll(createComponents(client, clusterService, threadPool, xContentRegistry, environment));
// This was lifted from the XPackPlugins createComponents when it got split
// This is not extensible and anyone copying this code needs to instead make this work
// using the same single service (at the time of this writing XPackPlugin was the place these common things got created)
// and do not just copy this whole thing and drop it in your service.
// The Actions for this service will also have to be moved back into XPackPlugin
List<PersistentTasksExecutor<?>> tasksExecutors = new ArrayList<>();
tasksExecutors.addAll(createPersistentTasksExecutors(clusterService));
PersistentTasksExecutorRegistry registry = new PersistentTasksExecutorRegistry(settings, tasksExecutors);
PersistentTasksClusterService persistentTasksClusterService = new PersistentTasksClusterService(settings, registry, clusterService);
components.add(persistentTasksClusterService);
components.add(persistentTasksService);
components.add(registry);
return components;
}
// TODO: once initialization of the PersistentTasksClusterService, PersistentTasksService
// and PersistentTasksExecutorRegistry has been moved somewhere else the entire contents of
// this method can replace the entire contents of the overridden createComponents() method
private Collection<Object> createComponents(Client client, ClusterService clusterService, ThreadPool threadPool,
NamedXContentRegistry xContentRegistry, Environment environment) {
if (enabled == false || transportClientMode) {
return emptyList();
}
@ -443,7 +417,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu
);
}
public List<PersistentTasksExecutor<?>> createPersistentTasksExecutors(ClusterService clusterService) {
public List<PersistentTasksExecutor<?>> getPersistentTasksExecutor(ClusterService clusterService) {
if (enabled == false || transportClientMode) {
return emptyList();
}
@ -570,14 +544,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu
new ActionHandler<>(DeleteCalendarEventAction.INSTANCE, TransportDeleteCalendarEventAction.class),
new ActionHandler<>(UpdateCalendarJobAction.INSTANCE, TransportUpdateCalendarJobAction.class),
new ActionHandler<>(GetCalendarEventsAction.INSTANCE, TransportGetCalendarEventsAction.class),
new ActionHandler<>(PostCalendarEventsAction.INSTANCE, TransportPostCalendarEventsAction.class),
// These actions reside here because ML is the only user of the Persistence service currently.
// Once another project uses this service, these actions will need to be moved out to a common place
// where they are registered.
new ActionHandler<>(StartPersistentTaskAction.INSTANCE, StartPersistentTaskAction.TransportAction.class),
new ActionHandler<>(UpdatePersistentTaskStatusAction.INSTANCE, UpdatePersistentTaskStatusAction.TransportAction.class),
new ActionHandler<>(RemovePersistentTaskAction.INSTANCE, RemovePersistentTaskAction.TransportAction.class),
new ActionHandler<>(CompletionPersistentTaskAction.INSTANCE, CompletionPersistentTaskAction.TransportAction.class)
new ActionHandler<>(PostCalendarEventsAction.INSTANCE, TransportPostCalendarEventsAction.class)
);
}
@Override

View File

@ -18,9 +18,9 @@ import org.elasticsearch.xpack.core.ml.MlMetadata;
import org.elasticsearch.xpack.core.ml.action.OpenJobAction;
import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.Assignment;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.ml.notifications.Auditor;
import java.util.Objects;

View File

@ -38,8 +38,8 @@ import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksService;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksService;
import org.elasticsearch.xpack.ml.MachineLearning;
import org.elasticsearch.xpack.ml.notifications.Auditor;

View File

@ -25,8 +25,8 @@ import org.elasticsearch.xpack.core.ml.MLMetadataField;
import org.elasticsearch.xpack.core.ml.MlMetadata;
import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction;
import org.elasticsearch.xpack.core.ml.action.IsolateDatafeedAction;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksService;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksService;
import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;

View File

@ -33,8 +33,8 @@ import org.elasticsearch.xpack.core.ml.MlMetadata;
import org.elasticsearch.xpack.core.ml.action.DeleteJobAction;
import org.elasticsearch.xpack.core.ml.action.KillProcessAction;
import org.elasticsearch.xpack.core.ml.job.persistence.JobStorageDeletionTask;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksService;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksService;
import org.elasticsearch.xpack.ml.job.JobManager;
import java.util.concurrent.TimeoutException;

View File

@ -24,7 +24,7 @@ import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction;
import org.elasticsearch.xpack.core.ml.action.util.QueryPage;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import java.util.List;
import java.util.Set;

View File

@ -31,7 +31,7 @@ import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts;
import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager;

View File

@ -24,7 +24,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.core.ml.MlMetadata;
import org.elasticsearch.xpack.core.ml.action.IsolateDatafeedAction;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.ml.MachineLearning;
import java.io.IOException;

View File

@ -22,7 +22,7 @@ import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.core.ml.MlMetadata;
import org.elasticsearch.xpack.core.ml.action.JobTaskRequest;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.ml.job.JobManager;
import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager;

View File

@ -23,7 +23,7 @@ import org.elasticsearch.xpack.core.ml.MlMetadata;
import org.elasticsearch.xpack.core.ml.action.KillProcessAction;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.ml.MachineLearning;
import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager;
import org.elasticsearch.xpack.ml.notifications.Auditor;

View File

@ -56,10 +56,10 @@ import org.elasticsearch.xpack.core.ml.job.config.JobUpdate;
import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex;
import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.core.persistent.AllocatedPersistentTask;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksExecutor;
import org.elasticsearch.xpack.core.persistent.PersistentTasksService;
import org.elasticsearch.persistent.AllocatedPersistentTask;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksExecutor;
import org.elasticsearch.persistent.PersistentTasksService;
import org.elasticsearch.xpack.ml.MachineLearning;
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager;

View File

@ -38,10 +38,10 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.core.persistent.AllocatedPersistentTask;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksExecutor;
import org.elasticsearch.xpack.core.persistent.PersistentTasksService;
import org.elasticsearch.persistent.AllocatedPersistentTask;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksExecutor;
import org.elasticsearch.persistent.PersistentTasksService;
import org.elasticsearch.xpack.ml.MachineLearning;
import org.elasticsearch.xpack.ml.datafeed.DatafeedManager;
import org.elasticsearch.xpack.ml.datafeed.DatafeedNodeSelector;

View File

@ -33,8 +33,8 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksService;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksService;
import org.elasticsearch.xpack.ml.MachineLearning;
import java.io.IOException;

View File

@ -25,7 +25,7 @@ import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction;
import org.elasticsearch.xpack.core.ml.action.UpdateDatafeedAction;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedUpdate;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
public class TransportUpdateDatafeedAction extends TransportMasterNodeAction<UpdateDatafeedAction.Request, PutDatafeedAction.Response> {

View File

@ -29,14 +29,11 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.ml.job.messages.Messages;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.core.persistent.PersistentTasksService;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.ml.MachineLearning;
import org.elasticsearch.xpack.ml.action.TransportStartDatafeedAction;
import org.elasticsearch.xpack.ml.notifications.Auditor;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import java.util.ArrayList;
import java.util.Iterator;
@ -53,7 +50,7 @@ import java.util.function.Supplier;
import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.core.persistent.PersistentTasksService.WaitForPersistentTaskStatusListener;
import static org.elasticsearch.persistent.PersistentTasksService.WaitForPersistentTaskStatusListener;
public class DatafeedManager extends AbstractComponent {

View File

@ -18,7 +18,7 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import java.util.List;
import java.util.Objects;

View File

@ -43,7 +43,7 @@ import org.elasticsearch.xpack.core.ml.job.persistence.JobStorageDeletionTask;
import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats;
import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister;
import org.elasticsearch.xpack.ml.job.process.autodetect.UpdateParams;

View File

@ -36,7 +36,7 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts;
import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats;
import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot;
import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.ml.MachineLearning;
import org.elasticsearch.xpack.ml.action.TransportOpenJobAction.JobTask;
import org.elasticsearch.xpack.ml.job.JobManager;

View File

@ -31,7 +31,7 @@ import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction;
import org.elasticsearch.xpack.core.ml.client.MachineLearningClient;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.ml.LocalStateMachineLearning;
import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase;
import org.junit.Before;

View File

@ -16,7 +16,7 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.ml.notifications.Auditor;
import java.net.InetAddress;

View File

@ -46,7 +46,7 @@ public class MlDailyManagementServiceTests extends ESTestCase {
CountDownLatch latch = new CountDownLatch(triggerCount);
try (MlDailyMaintenanceService service = createService(latch, client)) {
service.start();
latch.await(1, TimeUnit.SECONDS);
latch.await(5, TimeUnit.SECONDS);
}
verify(client, Mockito.atLeast(triggerCount - 1)).execute(same(DeleteExpiredDataAction.INSTANCE), any(), any());

View File

@ -29,7 +29,7 @@ import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus;
import org.elasticsearch.xpack.core.ml.job.config.JobTests;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests;
import java.util.Collections;
@ -37,7 +37,7 @@ import java.util.Date;
import java.util.Map;
import static org.elasticsearch.xpack.core.ml.job.config.JobTests.buildJobBuilder;
import static org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.INITIAL_ASSIGNMENT;
import static org.elasticsearch.persistent.PersistentTasksCustomMetaData.INITIAL_ASSIGNMENT;
import static org.elasticsearch.xpack.ml.action.TransportOpenJobActionTests.addJobTask;
import static org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests.createDatafeedConfig;
import static org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests.createDatafeedJob;

View File

@ -28,9 +28,9 @@ import org.elasticsearch.xpack.core.ml.action.CloseJobAction.Request;
import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.Assignment;
import org.elasticsearch.xpack.core.persistent.PersistentTasksService;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment;
import org.elasticsearch.persistent.PersistentTasksService;
import org.elasticsearch.xpack.ml.notifications.Auditor;
import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase;

View File

@ -41,8 +41,8 @@ import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex;
import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields;
import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings;
import org.elasticsearch.xpack.core.ml.notifications.AuditorField;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.Assignment;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment;
import org.elasticsearch.xpack.ml.MachineLearning;
import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase;

View File

@ -14,14 +14,14 @@ import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.ml.datafeed.DatafeedManager;
import org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests;
import java.util.Collections;
import java.util.Date;
import static org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.INITIAL_ASSIGNMENT;
import static org.elasticsearch.persistent.PersistentTasksCustomMetaData.INITIAL_ASSIGNMENT;
import static org.elasticsearch.xpack.ml.action.TransportOpenJobActionTests.addJobTask;
import static org.hamcrest.Matchers.equalTo;

View File

@ -14,7 +14,7 @@ import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase;
import java.util.ArrayList;

View File

@ -35,8 +35,8 @@ import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.ml.notifications.AuditMessage;
import org.elasticsearch.xpack.core.ml.notifications.AuditorField;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.ml.MachineLearning;
import org.elasticsearch.xpack.ml.action.TransportStartDatafeedActionTests;
import org.elasticsearch.xpack.ml.action.TransportStartDatafeedAction.DatafeedTask;

View File

@ -32,7 +32,7 @@ import org.elasticsearch.xpack.core.ml.MlMetadata;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.junit.Before;
import java.net.InetAddress;

View File

@ -38,8 +38,8 @@ import org.elasticsearch.xpack.core.ml.job.config.Detector;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.ml.MachineLearning;
import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase;

View File

@ -15,7 +15,7 @@ import org.elasticsearch.xpack.core.ml.MlMetadata;
import org.elasticsearch.xpack.core.ml.action.DeleteJobAction;
import org.elasticsearch.xpack.core.ml.action.PutJobAction;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase;
import java.util.concurrent.CountDownLatch;

View File

@ -33,8 +33,8 @@ import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase;
import java.io.IOException;

View File

@ -21,7 +21,7 @@ import org.elasticsearch.xpack.core.ml.action.PutJobAction;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.ml.MachineLearning;
import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager;
import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase;

View File

@ -33,7 +33,7 @@ import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.job.config.JobState;
import org.elasticsearch.xpack.core.ml.job.config.MlFilter;
import org.elasticsearch.xpack.core.ml.job.config.RuleCondition;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.ml.job.categorization.CategorizationAnalyzerTests;
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
import org.elasticsearch.xpack.ml.job.process.autodetect.UpdateParams;

View File

@ -424,6 +424,9 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
components.add(realms);
components.add(reservedRealm);
securityLifecycleService.addSecurityIndexHealthChangeListener(nativeRoleMappingStore::onSecurityIndexHealthChange);
securityLifecycleService.addSecurityIndexOutOfDateListener(nativeRoleMappingStore::onSecurityIndexOutOfDateChange);
AuthenticationFailureHandler failureHandler = null;
String extensionName = null;
for (SecurityExtension extension : securityExtensions) {

View File

@ -11,6 +11,7 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterIndexHealth;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.component.AbstractComponent;
@ -214,4 +215,21 @@ public class SecurityLifecycleService extends AbstractComponent implements Clust
public boolean isSecurityIndexOutOfDate() {
return securityIndex.isIndexUpToDate() == false;
}
/**
* Is the move from {@code previousHealth} to {@code currentHealth} a move from an unhealthy ("RED") index state to a healthy
* ("non-RED") state.
*/
public static boolean isMoveFromRedToNonRed(ClusterIndexHealth previousHealth, ClusterIndexHealth currentHealth) {
return (previousHealth == null || previousHealth.getStatus() == ClusterHealthStatus.RED)
&& currentHealth != null && currentHealth.getStatus() != ClusterHealthStatus.RED;
}
/**
* Is the move from {@code previousHealth} to {@code currentHealth} a move from index-exists to index-deleted
*/
public static boolean isIndexDeleted(ClusterIndexHealth previousHealth, ClusterIndexHealth currentHealth) {
return previousHealth != null && currentHealth == null;
}
}

View File

@ -12,6 +12,7 @@ import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.ContextPreservingActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.health.ClusterIndexHealth;
import org.elasticsearch.common.CheckedBiConsumer;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractComponent;
@ -58,6 +59,8 @@ import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN;
import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin;
import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME;
import static org.elasticsearch.xpack.security.SecurityLifecycleService.isIndexDeleted;
import static org.elasticsearch.xpack.security.SecurityLifecycleService.isMoveFromRedToNonRed;
/**
* This store reads + writes {@link ExpressionRoleMapping role mappings} in an Elasticsearch
@ -79,6 +82,18 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
private static final String SECURITY_GENERIC_TYPE = "doc";
private static final ActionListener<Object> NO_OP_ACTION_LISTENER = new ActionListener<Object>() {
@Override
public void onResponse(Object o) {
// nothing
}
@Override
public void onFailure(Exception e) {
// nothing
}
};
private final Client client;
private final SecurityLifecycleService securityLifecycleService;
private final List<String> realmsToRefresh = new CopyOnWriteArrayList<>();
@ -301,6 +316,17 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
listener.onResponse(usageStats);
}
public void onSecurityIndexHealthChange(ClusterIndexHealth previousHealth, ClusterIndexHealth currentHealth) {
if (isMoveFromRedToNonRed(previousHealth, currentHealth) || isIndexDeleted(previousHealth, currentHealth)) {
refreshRealms(NO_OP_ACTION_LISTENER, null);
}
}
public void onSecurityIndexOutOfDateChange(boolean prevOutOfDate, boolean outOfDate) {
assert prevOutOfDate != outOfDate : "this method should only be called if the two values are different";
refreshRealms(NO_OP_ACTION_LISTENER, null);
}
private <Result> void refreshRealms(ActionListener<Result> listener, Result result) {
String[] realmNames = this.realmsToRefresh.toArray(new String[realmsToRefresh.size()]);
final SecurityClient securityClient = new SecurityClient(client);

View File

@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security.authz.store;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterIndexHealth;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
@ -34,6 +33,7 @@ import org.elasticsearch.xpack.core.security.authz.privilege.ClusterPrivilege;
import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege;
import org.elasticsearch.xpack.core.security.authz.privilege.Privilege;
import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore;
import org.elasticsearch.xpack.security.SecurityLifecycleService;
import java.util.ArrayList;
import java.util.Arrays;
@ -53,6 +53,8 @@ import java.util.function.BiConsumer;
import java.util.stream.Collectors;
import static org.elasticsearch.xpack.core.security.SecurityField.setting;
import static org.elasticsearch.xpack.security.SecurityLifecycleService.isIndexDeleted;
import static org.elasticsearch.xpack.security.SecurityLifecycleService.isMoveFromRedToNonRed;
/**
* A composite roles store that combines built in roles, file-based roles, and index-based roles. Checks the built in roles first, then the
@ -322,11 +324,7 @@ public class CompositeRolesStore extends AbstractComponent {
}
public void onSecurityIndexHealthChange(ClusterIndexHealth previousHealth, ClusterIndexHealth currentHealth) {
final boolean movedFromRedToNonRed = (previousHealth == null || previousHealth.getStatus() == ClusterHealthStatus.RED)
&& currentHealth != null && currentHealth.getStatus() != ClusterHealthStatus.RED;
final boolean indexDeleted = previousHealth != null && currentHealth == null;
if (movedFromRedToNonRed || indexDeleted) {
if (isMoveFromRedToNonRed(previousHealth, currentHealth) || isIndexDeleted(previousHealth, currentHealth)) {
invalidateAll();
}
}

View File

@ -0,0 +1,211 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.security.authc.support.mapper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterIndexHealth;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.TestEnvironment;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction;
import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest;
import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse;
import org.elasticsearch.xpack.core.security.authc.AuthenticationResult;
import org.elasticsearch.xpack.core.security.authc.RealmConfig;
import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken;
import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping;
import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression;
import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression.FieldValue;
import org.elasticsearch.xpack.core.security.user.User;
import org.elasticsearch.xpack.security.SecurityLifecycleService;
import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm;
import org.elasticsearch.xpack.security.authc.support.UserRoleMapper;
import org.hamcrest.Matchers;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import static org.elasticsearch.xpack.security.test.SecurityTestUtils.getClusterIndexHealth;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class NativeRoleMappingStoreTests extends ESTestCase {
public void testResolveRoles() throws Exception {
// Does match DN
final ExpressionRoleMapping mapping1 = new ExpressionRoleMapping("dept_h",
new FieldExpression("dn", Collections.singletonList(new FieldValue("*,ou=dept_h,o=forces,dc=gc,dc=ca"))),
Arrays.asList("dept_h", "defence"), Collections.emptyMap(), true);
// Does not match - user is not in this group
final ExpressionRoleMapping mapping2 = new ExpressionRoleMapping("admin",
new FieldExpression("groups", Collections.singletonList(
new FieldValue(randomiseDn("cn=esadmin,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")))),
Arrays.asList("admin"), Collections.emptyMap(), true);
// Does match - user is one of these groups
final ExpressionRoleMapping mapping3 = new ExpressionRoleMapping("flight",
new FieldExpression("groups", Arrays.asList(
new FieldValue(randomiseDn("cn=alphaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")),
new FieldValue(randomiseDn("cn=betaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")),
new FieldValue(randomiseDn("cn=gammaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca"))
)),
Arrays.asList("flight"), Collections.emptyMap(), true);
// Does not match - mapping is not enabled
final ExpressionRoleMapping mapping4 = new ExpressionRoleMapping("mutants",
new FieldExpression("groups", Collections.singletonList(
new FieldValue(randomiseDn("cn=mutants,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")))),
Arrays.asList("mutants"), Collections.emptyMap(), false);
final Client client = mock(Client.class);
final SecurityLifecycleService lifecycleService = mock(SecurityLifecycleService.class);
when(lifecycleService.isSecurityIndexAvailable()).thenReturn(true);
final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, lifecycleService) {
@Override
protected void loadMappings(ActionListener<List<ExpressionRoleMapping>> listener) {
final List<ExpressionRoleMapping> mappings = Arrays.asList(mapping1, mapping2, mapping3, mapping4);
logger.info("Role mappings are: [{}]", mappings);
listener.onResponse(mappings);
}
};
final RealmConfig realm = new RealmConfig("ldap1", Settings.EMPTY, Settings.EMPTY, mock(Environment.class),
new ThreadContext(Settings.EMPTY));
final PlainActionFuture<Set<String>> future = new PlainActionFuture<>();
final UserRoleMapper.UserData user = new UserRoleMapper.UserData("sasquatch",
randomiseDn("cn=walter.langowski,ou=people,ou=dept_h,o=forces,dc=gc,dc=ca"),
Arrays.asList(
randomiseDn("cn=alphaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca"),
randomiseDn("cn=mutants,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")
), Collections.emptyMap(), realm);
logger.info("UserData is [{}]", user);
store.resolveRoles(user, future);
final Set<String> roles = future.get();
assertThat(roles, Matchers.containsInAnyOrder("dept_h", "defence", "flight"));
}
private String randomiseDn(String dn) {
// Randomly transform the dn into another valid form that is logically identical,
// but (potentially) textually different
switch (randomIntBetween(0, 3)) {
case 0:
// do nothing
return dn;
case 1:
return dn.toUpperCase(Locale.ROOT);
case 2:
// Upper case just the attribute name for each RDN
return Arrays.stream(dn.split(",")).map(s -> {
final String[] arr = s.split("=");
arr[0] = arr[0].toUpperCase(Locale.ROOT);
return String.join("=", arr);
}).collect(Collectors.joining(","));
case 3:
return dn.replaceAll(",", ", ");
}
return dn;
}
public void testCacheClearOnIndexHealthChange() {
final AtomicInteger numInvalidation = new AtomicInteger(0);
final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation);
int expectedInvalidation = 0;
// existing to no longer present
ClusterIndexHealth previousHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW));
ClusterIndexHealth currentHealth = null;
store.onSecurityIndexHealthChange(previousHealth, currentHealth);
assertEquals(++expectedInvalidation, numInvalidation.get());
// doesn't exist to exists
previousHealth = null;
currentHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW));
store.onSecurityIndexHealthChange(previousHealth, currentHealth);
assertEquals(++expectedInvalidation, numInvalidation.get());
// green or yellow to red
previousHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW));
currentHealth = getClusterIndexHealth(ClusterHealthStatus.RED);
store.onSecurityIndexHealthChange(previousHealth, currentHealth);
assertEquals(expectedInvalidation, numInvalidation.get());
// red to non red
previousHealth = getClusterIndexHealth(ClusterHealthStatus.RED);
currentHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW));
store.onSecurityIndexHealthChange(previousHealth, currentHealth);
assertEquals(++expectedInvalidation, numInvalidation.get());
// green to yellow or yellow to green
previousHealth = getClusterIndexHealth(randomFrom(ClusterHealthStatus.GREEN, ClusterHealthStatus.YELLOW));
currentHealth = getClusterIndexHealth(
previousHealth.getStatus() == ClusterHealthStatus.GREEN ? ClusterHealthStatus.YELLOW : ClusterHealthStatus.GREEN);
store.onSecurityIndexHealthChange(previousHealth, currentHealth);
assertEquals(expectedInvalidation, numInvalidation.get());
}
public void testCacheClearOnIndexOutOfDateChange() {
final AtomicInteger numInvalidation = new AtomicInteger(0);
final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation);
store.onSecurityIndexOutOfDateChange(false, true);
assertEquals(1, numInvalidation.get());
store.onSecurityIndexOutOfDateChange(true, false);
assertEquals(2, numInvalidation.get());
}
private NativeRoleMappingStore buildRoleMappingStoreForInvalidationTesting(AtomicInteger invalidationCounter) {
final Settings settings = Settings.builder().put("path.home", createTempDir()).build();
final ThreadPool threadPool = mock(ThreadPool.class);
final ThreadContext threadContext = new ThreadContext(settings);
when(threadPool.getThreadContext()).thenReturn(threadContext);
final Client client = mock(Client.class);
when(client.threadPool()).thenReturn(threadPool);
when(client.settings()).thenReturn(settings);
doAnswer(invocationOnMock -> {
ActionListener<ClearRealmCacheResponse> listener = (ActionListener<ClearRealmCacheResponse>) invocationOnMock.getArguments()[2];
invalidationCounter.incrementAndGet();
listener.onResponse(new ClearRealmCacheResponse(new ClusterName("cluster"), Collections.emptyList(), Collections.emptyList()));
return null;
}).when(client).execute(eq(ClearRealmCacheAction.INSTANCE), any(ClearRealmCacheRequest.class), any(ActionListener.class));
final Environment env = TestEnvironment.newEnvironment(settings);
final RealmConfig realmConfig = new RealmConfig(getTestName(), Settings.EMPTY, settings, env, threadContext);
final CachingUsernamePasswordRealm mockRealm = new CachingUsernamePasswordRealm("test", realmConfig) {
@Override
protected void doAuthenticate(UsernamePasswordToken token, ActionListener<AuthenticationResult> listener) {
listener.onResponse(AuthenticationResult.notHandled());
}
@Override
protected void doLookupUser(String username, ActionListener<User> listener) {
listener.onResponse(null);
}
};
final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, mock(SecurityLifecycleService.class));
store.refreshRealmOnChange(mockRealm);
return store;
}
}

View File

@ -1,111 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.security.authc.support.mapper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.core.security.authc.RealmConfig;
import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping;
import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression;
import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression.FieldValue;
import org.elasticsearch.xpack.security.SecurityLifecycleService;
import org.elasticsearch.xpack.security.authc.support.UserRoleMapper;
import org.hamcrest.Matchers;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.stream.Collectors;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class NativeUserRoleMapperTests extends ESTestCase {
public void testResolveRoles() throws Exception {
// Does match DN
final ExpressionRoleMapping mapping1 = new ExpressionRoleMapping("dept_h",
new FieldExpression("dn", Collections.singletonList(new FieldValue("*,ou=dept_h,o=forces,dc=gc,dc=ca"))),
Arrays.asList("dept_h", "defence"), Collections.emptyMap(), true);
// Does not match - user is not in this group
final ExpressionRoleMapping mapping2 = new ExpressionRoleMapping("admin",
new FieldExpression("groups", Collections.singletonList(
new FieldValue(randomiseDn("cn=esadmin,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")))),
Arrays.asList("admin"), Collections.emptyMap(), true);
// Does match - user is one of these groups
final ExpressionRoleMapping mapping3 = new ExpressionRoleMapping("flight",
new FieldExpression("groups", Arrays.asList(
new FieldValue(randomiseDn("cn=alphaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")),
new FieldValue(randomiseDn("cn=betaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")),
new FieldValue(randomiseDn("cn=gammaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca"))
)),
Arrays.asList("flight"), Collections.emptyMap(), true);
// Does not match - mapping is not enabled
final ExpressionRoleMapping mapping4 = new ExpressionRoleMapping("mutants",
new FieldExpression("groups", Collections.singletonList(
new FieldValue(randomiseDn("cn=mutants,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")))),
Arrays.asList("mutants"), Collections.emptyMap(), false);
final Client client = mock(Client.class);
final SecurityLifecycleService lifecycleService = mock(SecurityLifecycleService.class);
when(lifecycleService.isSecurityIndexAvailable()).thenReturn(true);
final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, lifecycleService) {
@Override
protected void loadMappings(ActionListener<List<ExpressionRoleMapping>> listener) {
final List<ExpressionRoleMapping> mappings = Arrays.asList(mapping1, mapping2, mapping3, mapping4);
logger.info("Role mappings are: [{}]", mappings);
listener.onResponse(mappings);
}
};
final RealmConfig realm = new RealmConfig("ldap1", Settings.EMPTY, Settings.EMPTY, mock(Environment.class),
new ThreadContext(Settings.EMPTY));
final PlainActionFuture<Set<String>> future = new PlainActionFuture<>();
final UserRoleMapper.UserData user = new UserRoleMapper.UserData("sasquatch",
randomiseDn("cn=walter.langowski,ou=people,ou=dept_h,o=forces,dc=gc,dc=ca"),
Arrays.asList(
randomiseDn("cn=alphaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca"),
randomiseDn("cn=mutants,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")
), Collections.emptyMap(), realm);
logger.info("UserData is [{}]", user);
store.resolveRoles(user, future);
final Set<String> roles = future.get();
assertThat(roles, Matchers.containsInAnyOrder("dept_h", "defence", "flight"));
}
private String randomiseDn(String dn) {
// Randomly transform the dn into another valid form that is logically identical,
// but (potentially) textually different
switch (randomIntBetween(0, 3)) {
case 0:
// do nothing
return dn;
case 1:
return dn.toUpperCase(Locale.ROOT);
case 2:
// Upper case just the attribute name for each RDN
return Arrays.stream(dn.split(",")).map(s -> {
final String[] arr = s.split("=");
arr[0] = arr[0].toUpperCase(Locale.ROOT);
return String.join("=", arr);
}).collect(Collectors.joining(","));
case 3:
return dn.replaceAll(",", ", ");
}
return dn;
}
}

View File

@ -78,9 +78,9 @@ import org.elasticsearch.xpack.core.ml.job.results.CategoryDefinition;
import org.elasticsearch.xpack.core.ml.job.results.Forecast;
import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats;
import org.elasticsearch.xpack.core.ml.job.results.Result;
import org.elasticsearch.xpack.core.persistent.PersistentTaskParams;
import org.elasticsearch.xpack.core.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.core.persistent.PersistentTasksNodeService;
import org.elasticsearch.persistent.PersistentTaskParams;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksNodeService;
import org.elasticsearch.xpack.core.security.SecurityField;
import org.elasticsearch.xpack.core.security.authc.TokenMetaData;