Remove InternalClient and InternalSecurityClient (elastic/x-pack-elasticsearch#3054)

This change removes the InternalClient and the InternalSecurityClient. These are replaced with
usage of the ThreadContext and a transient value, `action.origin`, to indicate which component the
request came from. The security code has been updated to look for this value and ensure the
request is executed as the proper user. This work comes from elastic/x-pack-elasticsearch#2808 where @s1monw suggested
that we do this.

While working on this, I came across index template registries and rather than updating them to use
the new method, I replaced the ML one with the template upgrade framework so that we could
remove this template registry. The watcher template registry is still needed as the template must be
updated for rolling upgrades to work (see elastic/x-pack-elasticsearch#2950).

Original commit: elastic/x-pack-elasticsearch@7dbf2f263e
This commit is contained in:
Jay Modi 2017-11-22 08:35:18 -07:00 committed by GitHub
parent c7a64667d4
commit 0a683a0e18
149 changed files with 2428 additions and 2863 deletions

View File

@ -258,6 +258,7 @@ integTestCluster {
setting 'xpack.security.transport.ssl.enabled', 'true'
setting 'xpack.security.transport.ssl.keystore.path', nodeKeystore.name
setting 'xpack.security.transport.ssl.verification_mode', 'certificate'
setting 'xpack.security.audit.enabled', 'true'
keystoreSetting 'bootstrap.password', 'x-pack-test-password'
keystoreSetting 'xpack.security.transport.ssl.keystore.secure_password', 'keypass'
distribution = 'zip' // this is important since we use the reindex module in ML

View File

@ -0,0 +1,101 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ContextPreservingActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.FilterClient;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import java.util.function.BiConsumer;
import java.util.function.Supplier;
/**
* Utility class to help with the execution of requests made using a {@link Client} such that they
* have the origin as a transient and listeners have the appropriate context upon invocation
*/
public final class ClientHelper {
public static final String ACTION_ORIGIN_TRANSIENT_NAME = "action.origin";
public static final String SECURITY_ORIGIN = "security";
public static final String WATCHER_ORIGIN = "watcher";
public static final String ML_ORIGIN = "ml";
public static final String MONITORING_ORIGIN = "monitoring";
public static final String DEPRECATION_ORIGIN = "deprecation";
public static final String PERSISTENT_TASK_ORIGIN = "persistent_tasks";
private ClientHelper() {}
/**
* Stashes the current context and sets the origin in the current context. The original context is returned as a stored context
*/
public static ThreadContext.StoredContext stashWithOrigin(ThreadContext threadContext, String origin) {
final ThreadContext.StoredContext storedContext = threadContext.stashContext();
threadContext.putTransient(ACTION_ORIGIN_TRANSIENT_NAME, origin);
return storedContext;
}
/**
* Returns a client that will always set the appropriate origin and ensure the proper context is restored by listeners
*/
public static Client clientWithOrigin(Client client, String origin) {
return new ClientWithOrigin(client, origin);
}
/**
* Executes a consumer after setting the origin and wrapping the listener so that the proper context is restored
*/
public static <Request extends ActionRequest, Response extends ActionResponse> void executeAsyncWithOrigin(
ThreadContext threadContext, String origin, Request request, ActionListener<Response> listener,
BiConsumer<Request, ActionListener<Response>> consumer) {
final Supplier<ThreadContext.StoredContext> supplier = threadContext.newRestorableContext(false);
try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, origin)) {
consumer.accept(request, new ContextPreservingActionListener<>(supplier, listener));
}
}
/**
* Executes an asynchronous action using the provided client. The origin is set in the context and the listener
* is wrapped to ensure the proper context is restored
*/
public static <Request extends ActionRequest, Response extends ActionResponse,
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void executeAsyncWithOrigin(
Client client, String origin, Action<Request, Response, RequestBuilder> action, Request request,
ActionListener<Response> listener) {
final ThreadContext threadContext = client.threadPool().getThreadContext();
final Supplier<ThreadContext.StoredContext> supplier = threadContext.newRestorableContext(false);
try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, origin)) {
client.execute(action, request, new ContextPreservingActionListener<>(supplier, listener));
}
}
private static final class ClientWithOrigin extends FilterClient {
private final String origin;
private ClientWithOrigin(Client in, String origin) {
super(in);
this.origin = origin;
}
@Override
protected <Request extends ActionRequest, Response extends ActionResponse,
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
final Supplier<ThreadContext.StoredContext> supplier = in().threadPool().getThreadContext().newRestorableContext(false);
try (ThreadContext.StoredContext ignore = in().threadPool().getThreadContext().stashContext()) {
in().threadPool().getThreadContext().putTransient(ACTION_ORIGIN_TRANSIENT_NAME, origin);
super.doExecute(action, request, new ContextPreservingActionListener<>(supplier, listener));
}
}
}
}

View File

@ -80,7 +80,6 @@ import org.elasticsearch.xpack.persistent.StartPersistentTaskAction;
import org.elasticsearch.xpack.persistent.UpdatePersistentTaskStatusAction;
import org.elasticsearch.xpack.rest.action.RestXPackInfoAction;
import org.elasticsearch.xpack.rest.action.RestXPackUsageAction;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.security.Security;
import org.elasticsearch.xpack.security.authc.AuthenticationService;
import org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken;
@ -249,9 +248,6 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
List<Object> components = new ArrayList<>();
components.add(sslService);
final InternalClient internalClient = new InternalClient(settings, threadPool, client);
components.add(internalClient);
LicenseService licenseService = new LicenseService(settings, clusterService, getClock(),
env, resourceWatcherService, licenseState);
components.add(licenseService);
@ -263,20 +259,18 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
} catch (final Exception e) {
throw new IllegalStateException("security initialization failed", e);
}
components.addAll(monitoring.createComponents(internalClient, threadPool, clusterService, licenseService, sslService));
components.addAll(monitoring.createComponents(client, threadPool, clusterService, licenseService, sslService));
components.addAll(watcher.createComponents(getClock(), scriptService, internalClient, licenseState, threadPool, clusterService,
components.addAll(watcher.createComponents(getClock(), scriptService, client, licenseState, threadPool, clusterService,
xContentRegistry, sslService));
PersistentTasksService persistentTasksService = new PersistentTasksService(settings, clusterService, threadPool, internalClient);
PersistentTasksService persistentTasksService = new PersistentTasksService(settings, clusterService, threadPool, client);
components.addAll(machineLearning.createComponents(internalClient, clusterService, threadPool, xContentRegistry,
components.addAll(machineLearning.createComponents(client, clusterService, threadPool, xContentRegistry,
persistentTasksService));
List<PersistentTasksExecutor<?>> tasksExecutors = new ArrayList<>();
tasksExecutors.addAll(machineLearning.createPersistentTasksExecutors(clusterService));
components.addAll(logstash.createComponents(internalClient, clusterService));
components.addAll(upgrade.createComponents(client, clusterService, threadPool, resourceWatcherService,
scriptService, xContentRegistry));
@ -451,6 +445,8 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
return templates -> {
templates = watcher.getIndexTemplateMetaDataUpgrader().apply(templates);
templates = security.getIndexTemplateMetaDataUpgrader().apply(templates);
templates = logstash.getIndexTemplateMetaDataUpgrader().apply(templates);
templates = machineLearning.getIndexTemplateMetaDataUpgrader().apply(templates);
return templates;
};
}

View File

@ -6,7 +6,6 @@
package org.elasticsearch.xpack.deprecation;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
@ -23,6 +22,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBui
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
@ -34,17 +34,14 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.license.LicenseUtils;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.node.NodeService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.deprecation.DeprecationIssue.Level;
import org.elasticsearch.xpack.security.InternalClient;
import java.io.IOException;
import java.util.Arrays;
@ -52,11 +49,12 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.function.BiFunction;
import java.util.function.Function;
import static org.elasticsearch.action.ValidateActions.addValidationError;
import static org.elasticsearch.xpack.ClientHelper.DEPRECATION_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.deprecation.DeprecationChecks.CLUSTER_SETTINGS_CHECKS;
import static org.elasticsearch.xpack.deprecation.DeprecationChecks.INDEX_SETTINGS_CHECKS;
import static org.elasticsearch.xpack.deprecation.DeprecationChecks.NODE_SETTINGS_CHECKS;
@ -281,14 +279,14 @@ public class DeprecationInfoAction extends Action<DeprecationInfoAction.Request,
public static class TransportAction extends TransportMasterNodeReadAction<Request, Response> {
private final XPackLicenseState licenseState;
private final InternalClient client;
private final NodeClient client;
private final IndexNameExpressionResolver indexNameExpressionResolver;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
XPackLicenseState licenseState, InternalClient client) {
XPackLicenseState licenseState, NodeClient client) {
super(settings, DeprecationInfoAction.NAME, transportService, clusterService, threadPool, actionFilters,
Request::new, indexNameExpressionResolver);
this.licenseState = licenseState;
@ -318,22 +316,26 @@ public class DeprecationInfoAction extends Action<DeprecationInfoAction.Request,
NodesInfoRequest nodesInfoRequest = new NodesInfoRequest("_local").settings(true).plugins(true);
NodesStatsRequest nodesStatsRequest = new NodesStatsRequest("_local").fs(true);
client.admin().cluster().nodesInfo(nodesInfoRequest, ActionListener.wrap(
final ThreadContext threadContext = client.threadPool().getThreadContext();
executeAsyncWithOrigin(threadContext, DEPRECATION_ORIGIN, nodesInfoRequest, ActionListener.<NodesInfoResponse>wrap(
nodesInfoResponse -> {
if (nodesInfoResponse.hasFailures()) {
throw nodesInfoResponse.failures().get(0);
}
client.admin().cluster().nodesStats(nodesStatsRequest, ActionListener.wrap(
executeAsyncWithOrigin(threadContext, DEPRECATION_ORIGIN, nodesStatsRequest,
ActionListener.<NodesStatsResponse>wrap(
nodesStatsResponse -> {
if (nodesStatsResponse.hasFailures()) {
throw nodesStatsResponse.failures().get(0);
}
listener.onResponse(Response.from(nodesInfoResponse.getNodes(),
nodesStatsResponse.getNodes(), state, indexNameExpressionResolver,
request.indices(), request.indicesOptions(), CLUSTER_SETTINGS_CHECKS,
NODE_SETTINGS_CHECKS, INDEX_SETTINGS_CHECKS));
}, listener::onFailure));
},listener::onFailure));
request.indices(), request.indicesOptions(),
CLUSTER_SETTINGS_CHECKS, NODE_SETTINGS_CHECKS,
INDEX_SETTINGS_CHECKS));
}, listener::onFailure),
client.admin().cluster()::nodesStats);
}, listener::onFailure), client.admin().cluster()::nodesInfo);
} else {
listener.onFailure(LicenseUtils.newComplianceException(XPackPlugin.DEPRECATION));
}

View File

@ -5,18 +5,22 @@
*/
package org.elasticsearch.xpack.logstash;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.XPackSettings;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.template.TemplateUtils;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.UnaryOperator;
import java.util.regex.Pattern;
/**
* This class activates/deactivates the logstash modules depending if we're running a node client or transport client
@ -24,13 +28,14 @@ import java.util.List;
public class Logstash implements ActionPlugin {
public static final String NAME = "logstash";
private static final String LOGSTASH_TEMPLATE_NAME = "logstash-index-template";
private static final String TEMPLATE_VERSION_PATTERN =
Pattern.quote("${logstash.template.version}");
private final Settings settings;
private final boolean enabled;
private final boolean transportClientMode;
public Logstash(Settings settings) {
this.settings = settings;
this.enabled = XPackSettings.LOGSTASH_ENABLED.get(settings);
this.transportClientMode = XPackPlugin.transportClientMode(settings);
}
@ -51,11 +56,11 @@ public class Logstash implements ActionPlugin {
return modules;
}
public Collection<Object> createComponents(InternalClient client, ClusterService clusterService) {
if (this.transportClientMode || enabled == false) {
return Collections.emptyList();
}
return Collections.singletonList(new LogstashTemplateRegistry(settings, clusterService, client));
public UnaryOperator<Map<String, IndexTemplateMetaData>> getIndexTemplateMetaDataUpgrader() {
return templates -> {
TemplateUtils.loadTemplateIntoMap("/" + LOGSTASH_TEMPLATE_NAME + ".json", templates, LOGSTASH_TEMPLATE_NAME,
Version.CURRENT.toString(), TEMPLATE_VERSION_PATTERN, Loggers.getLogger(Logstash.class));
return templates;
};
}
}

View File

@ -1,112 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.logstash;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.xpack.template.TemplateUtils;
import java.nio.charset.StandardCharsets;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.regex.Pattern;
/**
* Registry for the Logstash index template and settings
* This class is based on xpack.security.SecurityLifecycleService.
*/
public class LogstashTemplateRegistry extends AbstractComponent implements ClusterStateListener {
public static final String LOGSTASH_INDEX_NAME = ".logstash";
public static final String LOGSTASH_TEMPLATE_NAME = "logstash-index-template";
public static final String TEMPLATE_VERSION_PATTERN =
Pattern.quote("${logstash.template.version}");
private static final String LOGSTASH_VERSION_PROPERTY = "logstash-version";
private final Client client;
private final AtomicBoolean templateIsUpToDate = new AtomicBoolean(false);
// only put the template if this is not already in progress
private final AtomicBoolean templateCreationPending = new AtomicBoolean(false);
public LogstashTemplateRegistry(Settings settings, ClusterService clusterService, Client client) {
super(settings);
this.client = client;
clusterService.addListener(this);
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (event.localNodeMaster()) {
// wait until the gateway has recovered from disk,
// otherwise we think may not have the index templates while they actually do exist
if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK) == false) {
addTemplatesIfMissing(event.state());
}
}
}
public boolean isTemplateUpToDate() {
return templateIsUpToDate.get();
}
public boolean isTemplateCreationPending() {
return templateCreationPending.get();
}
private void addTemplatesIfMissing(ClusterState state) {
this.templateIsUpToDate.set(TemplateUtils.checkTemplateExistsAndIsUpToDate(LOGSTASH_TEMPLATE_NAME,
LOGSTASH_VERSION_PROPERTY, state, logger));
// only put the template if its not up to date and if its not already in progress
if (isTemplateUpToDate() == false && templateCreationPending.compareAndSet(false, true)) {
putTemplate();
}
}
private void putTemplate() {
logger.debug("putting the template [{}]", LOGSTASH_TEMPLATE_NAME);
String template = TemplateUtils.loadTemplate("/" + LOGSTASH_TEMPLATE_NAME + ".json",
Version.CURRENT.toString(), TEMPLATE_VERSION_PATTERN);
PutIndexTemplateRequest putTemplateRequest = client.admin().indices()
.preparePutTemplate(LOGSTASH_TEMPLATE_NAME)
.setSource(
new BytesArray(template.getBytes(StandardCharsets.UTF_8)),
XContentType.JSON)
.request();
client.admin().indices().putTemplate(putTemplateRequest, ActionListener.wrap(r -> {
templateCreationPending.set(false);
if (r.isAcknowledged()) {
templateIsUpToDate.set(true);
logger.debug("successfully updated [{}] index template", LOGSTASH_TEMPLATE_NAME);
} else {
logger.error("put template [{}] was not acknowledged", LOGSTASH_TEMPLATE_NAME);
}
}, e -> {
templateCreationPending.set(false);
logger.warn(new ParameterizedMessage(
"failed to put template [{}]", LOGSTASH_TEMPLATE_NAME), e);
}));
}
}

View File

@ -5,18 +5,26 @@
*/
package org.elasticsearch.xpack.ml;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
@ -27,7 +35,9 @@ import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.monitor.os.OsProbe;
import org.elasticsearch.monitor.os.OsStats;
@ -84,6 +94,8 @@ import org.elasticsearch.xpack.ml.datafeed.DatafeedState;
import org.elasticsearch.xpack.ml.job.JobManager;
import org.elasticsearch.xpack.ml.job.UpdateJobProcessNotifier;
import org.elasticsearch.xpack.ml.job.config.JobTaskStatus;
import org.elasticsearch.xpack.ml.job.persistence.AnomalyDetectorsIndex;
import org.elasticsearch.xpack.ml.job.persistence.ElasticsearchMappings;
import org.elasticsearch.xpack.ml.job.persistence.JobDataCountsPersister;
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister;
@ -99,6 +111,7 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.MultiplyingNormalizerPr
import org.elasticsearch.xpack.ml.job.process.normalizer.NativeNormalizerProcessFactory;
import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerFactory;
import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerProcessFactory;
import org.elasticsearch.xpack.ml.notifications.AuditMessage;
import org.elasticsearch.xpack.ml.notifications.Auditor;
import org.elasticsearch.xpack.ml.rest.RestDeleteExpiredDataAction;
import org.elasticsearch.xpack.ml.rest.datafeeds.RestDeleteDatafeedAction;
@ -138,7 +151,7 @@ import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.persistent.PersistentTasksExecutor;
import org.elasticsearch.xpack.persistent.PersistentTasksNodeService;
import org.elasticsearch.xpack.persistent.PersistentTasksService;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.template.TemplateUtils;
import java.io.IOException;
import java.math.BigInteger;
@ -147,7 +160,9 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
import java.util.function.UnaryOperator;
import static java.util.Collections.emptyList;
import static org.elasticsearch.xpack.XPackPlugin.MACHINE_LEARNING;
@ -175,6 +190,8 @@ public class MachineLearning implements ActionPlugin {
public static final TimeValue STATE_PERSIST_RESTORE_TIMEOUT = TimeValue.timeValueMinutes(30);
private static final Logger logger = Loggers.getLogger(XPackPlugin.class);
private final Settings settings;
private final Environment env;
private final XPackLicenseState licenseState;
@ -310,19 +327,19 @@ public class MachineLearning implements ActionPlugin {
);
}
public Collection<Object> createComponents(InternalClient internalClient, ClusterService clusterService, ThreadPool threadPool,
public Collection<Object> createComponents(Client client, ClusterService clusterService, ThreadPool threadPool,
NamedXContentRegistry xContentRegistry, PersistentTasksService persistentTasksService) {
if (enabled == false || transportClientMode || tribeNode || tribeNodeClient) {
return emptyList();
}
Auditor auditor = new Auditor(internalClient, clusterService);
JobProvider jobProvider = new JobProvider(internalClient, settings);
UpdateJobProcessNotifier notifier = new UpdateJobProcessNotifier(settings, internalClient, clusterService, threadPool);
JobManager jobManager = new JobManager(settings, jobProvider, clusterService, auditor, internalClient, notifier);
Auditor auditor = new Auditor(client, clusterService);
JobProvider jobProvider = new JobProvider(client, settings);
UpdateJobProcessNotifier notifier = new UpdateJobProcessNotifier(settings, client, clusterService, threadPool);
JobManager jobManager = new JobManager(settings, jobProvider, clusterService, auditor, client, notifier);
JobDataCountsPersister jobDataCountsPersister = new JobDataCountsPersister(settings, internalClient);
JobResultsPersister jobResultsPersister = new JobResultsPersister(settings, internalClient);
JobDataCountsPersister jobDataCountsPersister = new JobDataCountsPersister(settings, client);
JobResultsPersister jobResultsPersister = new JobResultsPersister(settings, client);
AutodetectProcessFactory autodetectProcessFactory;
NormalizerProcessFactory normalizerProcessFactory;
@ -333,7 +350,7 @@ public class MachineLearning implements ActionPlugin {
// This will only only happen when path.home is not set, which is disallowed in production
throw new ElasticsearchException("Failed to create native process controller for Machine Learning");
}
autodetectProcessFactory = new NativeAutodetectProcessFactory(env, settings, nativeController, internalClient);
autodetectProcessFactory = new NativeAutodetectProcessFactory(env, settings, nativeController, client);
normalizerProcessFactory = new NativeNormalizerProcessFactory(env, settings, nativeController);
} catch (IOException e) {
// This also should not happen in production, as the MachineLearningFeatureSet should have
@ -349,12 +366,12 @@ public class MachineLearning implements ActionPlugin {
}
NormalizerFactory normalizerFactory = new NormalizerFactory(normalizerProcessFactory,
threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME));
AutodetectProcessManager autodetectProcessManager = new AutodetectProcessManager(settings, internalClient, threadPool,
AutodetectProcessManager autodetectProcessManager = new AutodetectProcessManager(settings, client, threadPool,
jobManager, jobProvider, jobResultsPersister, jobDataCountsPersister, autodetectProcessFactory,
normalizerFactory, xContentRegistry, auditor);
this.autodetectProcessManager.set(autodetectProcessManager);
DatafeedJobBuilder datafeedJobBuilder = new DatafeedJobBuilder(internalClient, jobProvider, auditor, System::currentTimeMillis);
DatafeedManager datafeedManager = new DatafeedManager(threadPool, internalClient, clusterService, datafeedJobBuilder,
DatafeedJobBuilder datafeedJobBuilder = new DatafeedJobBuilder(client, jobProvider, auditor, System::currentTimeMillis);
DatafeedManager datafeedManager = new DatafeedManager(threadPool, client, clusterService, datafeedJobBuilder,
System::currentTimeMillis, auditor, persistentTasksService);
this.datafeedManager.set(datafeedManager);
MlLifeCycleService mlLifeCycleService = new MlLifeCycleService(env, clusterService, datafeedManager, autodetectProcessManager);
@ -366,8 +383,7 @@ public class MachineLearning implements ActionPlugin {
jobProvider,
jobManager,
autodetectProcessManager,
new MachineLearningTemplateRegistry(settings, clusterService, internalClient, threadPool),
new MlInitializationService(settings, threadPool, clusterService, internalClient),
new MlInitializationService(settings, threadPool, clusterService, client),
jobDataCountsPersister,
datafeedManager,
auditor,
@ -516,6 +532,100 @@ public class MachineLearning implements ActionPlugin {
return Arrays.asList(autoDetect, renormalizer, datafeed);
}
public UnaryOperator<Map<String, IndexTemplateMetaData>> getIndexTemplateMetaDataUpgrader() {
return templates -> {
final TimeValue delayedNodeTimeOutSetting;
// Whether we are using native process is a good way to detect whether we are in dev / test mode:
if (MachineLearning.AUTODETECT_PROCESS.get(settings)) {
delayedNodeTimeOutSetting = UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(settings);
} else {
delayedNodeTimeOutSetting = TimeValue.timeValueNanos(0);
}
try (XContentBuilder auditMapping = ElasticsearchMappings.auditMessageMapping()) {
IndexTemplateMetaData notificationMessageTemplate = IndexTemplateMetaData.builder(Auditor.NOTIFICATIONS_INDEX)
.putMapping(AuditMessage.TYPE.getPreferredName(), auditMapping.string())
.patterns(Collections.singletonList(Auditor.NOTIFICATIONS_INDEX))
.version(Version.CURRENT.id)
.settings(Settings.builder()
// Our indexes are small and one shard puts the
// least possible burden on Elasticsearch
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayedNodeTimeOutSetting))
.build();
templates.put(Auditor.NOTIFICATIONS_INDEX, notificationMessageTemplate);
} catch (IOException e) {
logger.warn("Error loading the template for the notification message index", e);
}
try (XContentBuilder docMapping = MlMetaIndex.docMapping()) {
IndexTemplateMetaData metaTemplate = IndexTemplateMetaData.builder(MlMetaIndex.INDEX_NAME)
.patterns(Collections.singletonList(MlMetaIndex.INDEX_NAME))
.settings(Settings.builder()
// Our indexes are small and one shard puts the
// least possible burden on Elasticsearch
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayedNodeTimeOutSetting))
.version(Version.CURRENT.id)
.putMapping(MlMetaIndex.TYPE, docMapping.string())
.build();
templates.put(MlMetaIndex.INDEX_NAME, metaTemplate);
} catch (IOException e) {
logger.warn("Error loading the template for the " + MlMetaIndex.INDEX_NAME + " index", e);
}
try (XContentBuilder stateMapping = ElasticsearchMappings.stateMapping()) {
IndexTemplateMetaData stateTemplate = IndexTemplateMetaData.builder(AnomalyDetectorsIndex.jobStateIndexName())
.patterns(Collections.singletonList(AnomalyDetectorsIndex.jobStateIndexName()))
// TODO review these settings
.settings(Settings.builder()
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayedNodeTimeOutSetting)
// Sacrifice durability for performance: in the event of power
// failure we can lose the last 5 seconds of changes, but it's
// much faster
.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), "async"))
.putMapping(ElasticsearchMappings.DOC_TYPE, stateMapping.string())
.version(Version.CURRENT.id)
.build();
templates.put(AnomalyDetectorsIndex.jobStateIndexName(), stateTemplate);
} catch (IOException e) {
logger.error("Error loading the template for the " + AnomalyDetectorsIndex.jobStateIndexName() + " index", e);
}
try (XContentBuilder docMapping = ElasticsearchMappings.docMapping()) {
IndexTemplateMetaData jobResultsTemplate = IndexTemplateMetaData.builder(AnomalyDetectorsIndex.jobResultsIndexPrefix())
.patterns(Collections.singletonList(AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*"))
.settings(Settings.builder()
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayedNodeTimeOutSetting)
// Sacrifice durability for performance: in the event of power
// failure we can lose the last 5 seconds of changes, but it's
// much faster
.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), "async")
// set the default all search field
.put(IndexSettings.DEFAULT_FIELD_SETTING.getKey(), ElasticsearchMappings.ALL_FIELD_VALUES))
.putMapping(ElasticsearchMappings.DOC_TYPE, docMapping.string())
.version(Version.CURRENT.id)
.build();
templates.put(AnomalyDetectorsIndex.jobResultsIndexPrefix(), jobResultsTemplate);
} catch (IOException e) {
logger.error("Error loading the template for the " + AnomalyDetectorsIndex.jobResultsIndexPrefix() + " indices", e);
}
return templates;
};
}
public static boolean allTemplatesInstalled(ClusterState clusterState) {
boolean allPresent = true;
List<String> templateNames = Arrays.asList(Auditor.NOTIFICATIONS_INDEX, MlMetaIndex.INDEX_NAME,
AnomalyDetectorsIndex.jobStateIndexName(), AnomalyDetectorsIndex.jobResultsIndexPrefix());
for (String templateName : templateNames) {
allPresent = allPresent && TemplateUtils.checkTemplateExistsAndVersionIsGTECurrentVersion(templateName, clusterState);
}
return allPresent;
}
/**
* Find the memory size (in bytes) of the machine this node is running on.
* Takes container limits (as used by Docker for example) into account.

View File

@ -1,315 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ml;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.ml.job.persistence.AnomalyDetectorsIndex;
import org.elasticsearch.xpack.ml.job.persistence.ElasticsearchMappings;
import org.elasticsearch.xpack.ml.notifications.AuditMessage;
import org.elasticsearch.xpack.ml.notifications.Auditor;
import java.io.IOException;
import java.util.Collections;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BiConsumer;
/**
* Registry for the ML index templates and settings
*/
public class MachineLearningTemplateRegistry extends AbstractComponent implements ClusterStateListener {
private static final String ASYNC = "async";
private final Client client;
private final ThreadPool threadPool;
public static final String [] TEMPLATE_NAMES = new String [] {Auditor.NOTIFICATIONS_INDEX, MlMetaIndex.INDEX_NAME,
AnomalyDetectorsIndex.jobStateIndexName(), AnomalyDetectorsIndex.jobResultsIndexPrefix()};
final AtomicBoolean putMlNotificationsIndexTemplateCheck = new AtomicBoolean(false);
final AtomicBoolean putMlMetaIndexTemplateCheck = new AtomicBoolean(false);
final AtomicBoolean putStateIndexTemplateCheck = new AtomicBoolean(false);
final AtomicBoolean putResultsIndexTemplateCheck = new AtomicBoolean(false);
// Allows us in test mode to disable the delay of shard allocation, so that in tests we don't have to wait for
// for at least a minute for shards to get allocated.
private final TimeValue delayedNodeTimeOutSetting;
public MachineLearningTemplateRegistry(Settings settings, ClusterService clusterService, Client client,
ThreadPool threadPool) {
super(settings);
this.client = client;
this.threadPool = threadPool;
// Whether we are using native process is a good way to detect whether we are in dev / test mode:
if (MachineLearning.AUTODETECT_PROCESS.get(settings)) {
delayedNodeTimeOutSetting = UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(settings);
} else {
delayedNodeTimeOutSetting = TimeValue.timeValueNanos(0);
}
clusterService.addListener(this);
}
@Override
public void clusterChanged(ClusterChangedEvent event) {
if (event.localNodeMaster()) {
// wait until the gateway has recovered from disk,
// otherwise we think may not have the index templates while they actually do exist
if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK) == false) {
addTemplatesIfMissing(event.state());
}
}
}
/**
* Puts the registered index templates if missing to the
* cluster waiting until the templates have been updated.
*/
public void addTemplatesIfMissing(ClusterState state) {
MetaData metaData = state.metaData();
addMlNotificationsIndexTemplate(metaData);
addMlMetaIndexTemplate(metaData);
addStateIndexTemplate(metaData);
addResultsIndexTemplate(metaData);
}
static boolean templateIsPresentAndUpToDate(String templateName, MetaData metaData) {
IndexTemplateMetaData templateMetaData = metaData.templates().get(templateName);
if (templateMetaData == null) {
return false;
}
return templateMetaData.version() != null && templateMetaData.version() >= Version.CURRENT.id;
}
private void addMlNotificationsIndexTemplate(MetaData metaData) {
if (putMlNotificationsIndexTemplateCheck.compareAndSet(false, true)) {
if (templateIsPresentAndUpToDate(Auditor.NOTIFICATIONS_INDEX, metaData) == false) {
threadPool.executor(ThreadPool.Names.GENERIC).execute(() -> {
putNotificationMessageIndexTemplate((result, error) -> {
putMlNotificationsIndexTemplateCheck.set(false);
if (result) {
logger.info("successfully created {} index template", Auditor.NOTIFICATIONS_INDEX);
} else {
logger.error(
new ParameterizedMessage("not able to create {} index template", Auditor.NOTIFICATIONS_INDEX), error);
}
});
});
} else {
putMlNotificationsIndexTemplateCheck.set(false);
}
}
}
private void addMlMetaIndexTemplate(MetaData metaData) {
if (putMlMetaIndexTemplateCheck.compareAndSet(false, true)) {
if (templateIsPresentAndUpToDate(MlMetaIndex.INDEX_NAME, metaData) == false) {
threadPool.executor(ThreadPool.Names.GENERIC).execute(() -> {
putMetaIndexTemplate((result, error) -> {
putMlMetaIndexTemplateCheck.set(false);
if (result) {
logger.info("successfully created {} index template", MlMetaIndex.INDEX_NAME);
} else {
logger.error(new ParameterizedMessage("not able to create {} index template", MlMetaIndex.INDEX_NAME), error);
}
});
});
} else {
putMlMetaIndexTemplateCheck.set(false);
}
}
}
private void addStateIndexTemplate(MetaData metaData) {
String stateIndexName = AnomalyDetectorsIndex.jobStateIndexName();
if (putStateIndexTemplateCheck.compareAndSet(false, true)) {
if (templateIsPresentAndUpToDate(stateIndexName, metaData) == false) {
threadPool.executor(ThreadPool.Names.GENERIC).execute(() -> {
putJobStateIndexTemplate((result, error) -> {
putStateIndexTemplateCheck.set(false);
if (result) {
logger.info("successfully created {} index template", stateIndexName);
} else {
logger.error("not able to create " + stateIndexName + " index template", error);
}
});
});
} else {
putStateIndexTemplateCheck.set(false);
}
}
}
private void addResultsIndexTemplate(MetaData metaData) {
if (putResultsIndexTemplateCheck.compareAndSet(false, true)) {
if (templateIsPresentAndUpToDate(AnomalyDetectorsIndex.jobResultsIndexPrefix(), metaData) == false) {
threadPool.executor(ThreadPool.Names.GENERIC).execute(() -> {
putJobResultsIndexTemplate((result, error) -> {
putResultsIndexTemplateCheck.set(false);
if (result) {
logger.info("successfully created {} index template", AnomalyDetectorsIndex.jobResultsIndexPrefix());
} else {
logger.error(
new ParameterizedMessage("not able to create {} index template",
AnomalyDetectorsIndex.jobResultsIndexPrefix()), error);
}
});
});
} else {
putResultsIndexTemplateCheck.set(false);
}
}
}
/**
* Index template for notifications
*/
void putNotificationMessageIndexTemplate(BiConsumer<Boolean, Exception> listener) {
try (XContentBuilder auditMapping = ElasticsearchMappings.auditMessageMapping()) {
PutIndexTemplateRequest templateRequest = new PutIndexTemplateRequest(Auditor.NOTIFICATIONS_INDEX);
templateRequest.patterns(Collections.singletonList(Auditor.NOTIFICATIONS_INDEX));
templateRequest.settings(mlNotificationIndexSettings());
templateRequest.mapping(AuditMessage.TYPE.getPreferredName(), auditMapping);
templateRequest.version(Version.CURRENT.id);
client.admin().indices().putTemplate(templateRequest,
ActionListener.wrap(r -> listener.accept(true, null), e -> listener.accept(false, e)));
} catch (IOException e) {
logger.warn("Error putting the template for the notification message index", e);
listener.accept(false,
new ElasticsearchException("Error creating the template mappings for the notification message indices", e));
}
}
/**
* Index template for meta data
*/
void putMetaIndexTemplate(BiConsumer<Boolean, Exception> listener) {
PutIndexTemplateRequest templateRequest = new PutIndexTemplateRequest(MlMetaIndex.INDEX_NAME);
templateRequest.patterns(Collections.singletonList(MlMetaIndex.INDEX_NAME));
templateRequest.settings(mlNotificationIndexSettings());
templateRequest.version(Version.CURRENT.id);
try (XContentBuilder docMapping = MlMetaIndex.docMapping()) {
templateRequest.mapping(MlMetaIndex.TYPE, docMapping);
} catch (IOException e) {
String msg = "Error creating template mappings for the " + MlMetaIndex.INDEX_NAME + " index";
logger.error(msg, e);
listener.accept(false, new ElasticsearchException(msg, e));
}
client.admin().indices().putTemplate(templateRequest,
ActionListener.wrap(r -> listener.accept(true, null), e -> listener.accept(false, e)));
}
void putJobStateIndexTemplate(BiConsumer<Boolean, Exception> listener) {
try (XContentBuilder stateMapping = ElasticsearchMappings.stateMapping()) {
PutIndexTemplateRequest templateRequest = new PutIndexTemplateRequest(AnomalyDetectorsIndex.jobStateIndexName());
templateRequest.patterns(Collections.singletonList(AnomalyDetectorsIndex.jobStateIndexName()));
templateRequest.settings(mlStateIndexSettings());
templateRequest.mapping(ElasticsearchMappings.DOC_TYPE, stateMapping);
templateRequest.version(Version.CURRENT.id);
client.admin().indices().putTemplate(templateRequest,
ActionListener.wrap(r -> listener.accept(true, null), e -> listener.accept(false, e)));
} catch (IOException e) {
logger.error("Error creating template mappings for the " + AnomalyDetectorsIndex.jobStateIndexName() + " index", e);
listener.accept(false, new ElasticsearchException("Error creating template mappings for the " +
AnomalyDetectorsIndex.jobStateIndexName() + " indices", e));
}
}
void putJobResultsIndexTemplate(BiConsumer<Boolean, Exception> listener) {
try (XContentBuilder docMapping = ElasticsearchMappings.docMapping()) {
PutIndexTemplateRequest templateRequest = new PutIndexTemplateRequest(AnomalyDetectorsIndex.jobResultsIndexPrefix());
templateRequest.patterns(Collections.singletonList(AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*"));
templateRequest.settings(mlResultsIndexSettings());
templateRequest.mapping(ElasticsearchMappings.DOC_TYPE, docMapping);
templateRequest.version(Version.CURRENT.id);
client.admin().indices().putTemplate(templateRequest,
ActionListener.wrap(r -> listener.accept(true, null), e -> listener.accept(false, e)));
} catch (IOException e) {
logger.error("Error creating template mappings for the " + AnomalyDetectorsIndex.jobResultsIndexPrefix() + " indices", e);
listener.accept(false, new ElasticsearchException("Error creating template mappings for the "
+ AnomalyDetectorsIndex.jobResultsIndexPrefix() + " index", e));
}
}
/**
* Build the index settings that we want to apply to results indexes.
*
* @return Builder initialised with the desired setting for the ML results indices.
*/
Settings.Builder mlResultsIndexSettings() {
return Settings.builder()
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayedNodeTimeOutSetting)
// Sacrifice durability for performance: in the event of power
// failure we can lose the last 5 seconds of changes, but it's
// much faster
.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), ASYNC)
// set the default all search field
.put(IndexSettings.DEFAULT_FIELD_SETTING.getKey(), ElasticsearchMappings.ALL_FIELD_VALUES);
}
/**
* Settings for the notification messages index
*
* @return Builder initialised with the desired setting for the ML index.
*/
Settings.Builder mlNotificationIndexSettings() {
return Settings.builder()
// Our indexes are small and one shard puts the
// least possible burden on Elasticsearch
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayedNodeTimeOutSetting);
}
/**
* Settings for the state index
*
* @return Builder initialised with the desired setting for the ML index.
*/
Settings.Builder mlStateIndexSettings() {
// TODO review these settings
return Settings.builder()
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayedNodeTimeOutSetting)
// Sacrifice durability for performance: in the event of power
// failure we can lose the last 5 seconds of changes, but it's
// much faster
.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), ASYNC);
}
public static boolean allTemplatesInstalled(MetaData metaData) {
boolean allPresent = true;
for (String templateName : TEMPLATE_NAMES) {
allPresent = allPresent && templateIsPresentAndUpToDate(templateName, metaData);
}
return allPresent;
}
}

View File

@ -14,6 +14,7 @@ import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.ml.action.DeleteExpiredDataAction;
import org.joda.time.DateTime;
@ -24,6 +25,10 @@ import java.util.Random;
import java.util.concurrent.ScheduledFuture;
import java.util.function.Supplier;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
/**
* A service that runs once a day and triggers maintenance tasks.
*/
@ -107,7 +112,8 @@ public class MlDailyMaintenanceService implements Releasable {
private void triggerTasks() {
LOGGER.info("triggering scheduled [ML] maintenance tasks");
client.execute(DeleteExpiredDataAction.INSTANCE, new DeleteExpiredDataAction.Request(), ActionListener.wrap(
executeAsyncWithOrigin(client, ML_ORIGIN, DeleteExpiredDataAction.INSTANCE, new DeleteExpiredDataAction.Request(),
ActionListener.wrap(
response -> LOGGER.info("Successfully completed [ML] maintenance tasks"),
e -> LOGGER.error("An error occurred during maintenance tasks execution", e)));
scheduleNext();

View File

@ -18,6 +18,7 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.tasks.BaseTasksRequest;
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -53,7 +54,6 @@ import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.persistent.PersistentTasksService;
import org.elasticsearch.xpack.security.InternalClient;
import java.io.IOException;
import java.util.ArrayList;
@ -66,6 +66,9 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
public class CloseJobAction extends Action<CloseJobAction.Request, CloseJobAction.Response, CloseJobAction.RequestBuilder> {
public static final CloseJobAction INSTANCE = new CloseJobAction();
@ -305,7 +308,7 @@ public class CloseJobAction extends Action<CloseJobAction.Request, CloseJobActio
public static class TransportAction extends TransportTasksAction<OpenJobAction.JobTask, Request, Response, Response> {
private final InternalClient client;
private final Client client;
private final ClusterService clusterService;
private final Auditor auditor;
private final PersistentTasksService persistentTasksService;
@ -313,7 +316,7 @@ public class CloseJobAction extends Action<CloseJobAction.Request, CloseJobActio
@Inject
public TransportAction(Settings settings, TransportService transportService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
ClusterService clusterService, InternalClient client,
ClusterService clusterService, Client client,
Auditor auditor, PersistentTasksService persistentTasksService) {
// We fork in innerTaskOperation(...), so we can use ThreadPool.Names.SAME here:
super(settings, CloseJobAction.NAME, threadPool, clusterService, transportService, actionFilters,
@ -548,18 +551,8 @@ public class CloseJobAction extends Action<CloseJobAction.Request, CloseJobActio
public void onResponse(Boolean result) {
FinalizeJobExecutionAction.Request finalizeRequest = new FinalizeJobExecutionAction.Request(
waitForCloseRequest.jobsToFinalize.toArray(new String[0]));
client.execute(FinalizeJobExecutionAction.INSTANCE, finalizeRequest,
new ActionListener<FinalizeJobExecutionAction.Response>() {
@Override
public void onResponse(FinalizeJobExecutionAction.Response r) {
listener.onResponse(response);
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
executeAsyncWithOrigin(client, ML_ORIGIN, FinalizeJobExecutionAction.INSTANCE, finalizeRequest,
ActionListener.wrap(r -> listener.onResponse(response), listener::onFailure));
}
@Override

View File

@ -15,6 +15,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterState;
@ -28,7 +29,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.threadpool.ThreadPool;
@ -38,11 +38,13 @@ import org.elasticsearch.xpack.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.persistent.PersistentTasksService;
import org.elasticsearch.xpack.security.InternalClient;
import java.io.IOException;
import java.util.Objects;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
public class DeleteDatafeedAction extends Action<DeleteDatafeedAction.Request, DeleteDatafeedAction.Response,
DeleteDatafeedAction.RequestBuilder> {
@ -163,16 +165,16 @@ public class DeleteDatafeedAction extends Action<DeleteDatafeedAction.Request, D
public static class TransportAction extends TransportMasterNodeAction<Request, Response> {
private InternalClient client;
private Client client;
private PersistentTasksService persistentTasksService;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
InternalClient internalClient, PersistentTasksService persistentTasksService) {
Client client, PersistentTasksService persistentTasksService) {
super(settings, DeleteDatafeedAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.client = internalClient;
this.client = client;
this.persistentTasksService = persistentTasksService;
}
@ -207,7 +209,7 @@ public class DeleteDatafeedAction extends Action<DeleteDatafeedAction.Request, D
);
IsolateDatafeedAction.Request isolateDatafeedRequest = new IsolateDatafeedAction.Request(request.getDatafeedId());
client.execute(IsolateDatafeedAction.INSTANCE, isolateDatafeedRequest, isolateDatafeedHandler);
executeAsyncWithOrigin(client, ML_ORIGIN, IsolateDatafeedAction.INSTANCE, isolateDatafeedRequest, isolateDatafeedHandler);
}
private void removeDatafeedTask(Request request, ClusterState state, ActionListener<Boolean> listener) {

View File

@ -13,6 +13,7 @@ import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
@ -32,7 +33,6 @@ import org.elasticsearch.xpack.ml.job.retention.ExpiredResultsRemover;
import org.elasticsearch.xpack.ml.job.retention.MlDataRemover;
import org.elasticsearch.xpack.ml.notifications.Auditor;
import org.elasticsearch.xpack.ml.utils.VolatileCursorIterator;
import org.elasticsearch.xpack.security.InternalClient;
import java.io.IOException;
import java.util.Arrays;
@ -125,13 +125,13 @@ public class DeleteExpiredDataAction extends Action<DeleteExpiredDataAction.Requ
public static class TransportAction extends HandledTransportAction<Request, Response> {
private final InternalClient client;
private final Client client;
private final ClusterService clusterService;
@Inject
public TransportAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
InternalClient client, ClusterService clusterService) {
Client client, ClusterService clusterService) {
super(settings, NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, Request::new);
this.client = client;
this.clusterService = clusterService;

View File

@ -9,6 +9,7 @@ import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
@ -18,6 +19,7 @@ import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -36,7 +38,6 @@ import org.elasticsearch.xpack.ml.job.config.Detector;
import org.elasticsearch.xpack.ml.job.config.Job;
import org.elasticsearch.xpack.ml.job.config.MlFilter;
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.security.InternalClient;
import java.io.IOException;
import java.util.ArrayList;
@ -44,6 +45,9 @@ import java.util.List;
import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
public class DeleteFilterAction extends Action<DeleteFilterAction.Request, DeleteFilterAction.Response, DeleteFilterAction.RequestBuilder> {
@ -144,14 +148,14 @@ public class DeleteFilterAction extends Action<DeleteFilterAction.Request, Delet
public static class TransportAction extends HandledTransportAction<Request, Response> {
private final InternalClient client;
private final Client client;
private final ClusterService clusterService;
@Inject
public TransportAction(Settings settings, ThreadPool threadPool,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
ClusterService clusterService, InternalClient client) {
ClusterService clusterService, Client client) {
super(settings, NAME, threadPool, transportService, actionFilters,
indexNameExpressionResolver, Request::new);
this.clusterService = clusterService;
@ -184,7 +188,8 @@ public class DeleteFilterAction extends Action<DeleteFilterAction.Request, Delet
BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();
bulkRequestBuilder.add(deleteRequest);
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
bulkRequestBuilder.execute(new ActionListener<BulkResponse>() {
executeAsyncWithOrigin(client, ML_ORIGIN, BulkAction.INSTANCE, bulkRequestBuilder.request(),
new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse bulkResponse) {
if (bulkResponse.getItems()[0].status() == RestStatus.NOT_FOUND) {

View File

@ -16,6 +16,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
@ -44,12 +45,14 @@ import org.elasticsearch.xpack.ml.job.persistence.JobStorageDeletionTask;
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.persistent.PersistentTasksService;
import org.elasticsearch.xpack.security.InternalClient;
import java.io.IOException;
import java.util.Objects;
import java.util.concurrent.TimeoutException;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
public class DeleteJobAction extends Action<DeleteJobAction.Request, DeleteJobAction.Response, DeleteJobAction.RequestBuilder> {
public static final DeleteJobAction INSTANCE = new DeleteJobAction();
@ -172,17 +175,17 @@ public class DeleteJobAction extends Action<DeleteJobAction.Request, DeleteJobAc
public static class TransportAction extends TransportMasterNodeAction<Request, Response> {
private final InternalClient internalClient;
private final Client client;
private final JobManager jobManager;
private final PersistentTasksService persistentTasksService;
@Inject
public TransportAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
JobManager jobManager, PersistentTasksService persistentTasksService, InternalClient internalClient) {
JobManager jobManager, PersistentTasksService persistentTasksService, Client client) {
super(settings, DeleteJobAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, Request::new);
this.internalClient = internalClient;
this.client = client;
this.jobManager = jobManager;
this.persistentTasksService = persistentTasksService;
}
@ -293,7 +296,7 @@ public class DeleteJobAction extends Action<DeleteJobAction.Request, DeleteJobAc
private void killProcess(String jobId, ActionListener<KillProcessAction.Response> listener) {
KillProcessAction.Request killRequest = new KillProcessAction.Request(jobId);
internalClient.execute(KillProcessAction.INSTANCE, killRequest, listener);
executeAsyncWithOrigin(client, ML_ORIGIN, KillProcessAction.INSTANCE, killRequest, listener);
}
private void removePersistentTask(String jobId, ClusterState currentState,

View File

@ -15,6 +15,7 @@ import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
@ -32,7 +33,6 @@ import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSnapshot;
import org.elasticsearch.xpack.ml.notifications.Auditor;
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.security.InternalClient;
import java.io.IOException;
import java.util.Collections;
@ -130,7 +130,7 @@ public class DeleteModelSnapshotAction extends Action<DeleteModelSnapshotAction.
public static class TransportAction extends HandledTransportAction<Request, Response> {
private final InternalClient client;
private final Client client;
private final JobProvider jobProvider;
private final ClusterService clusterService;
private final Auditor auditor;
@ -138,7 +138,7 @@ public class DeleteModelSnapshotAction extends Action<DeleteModelSnapshotAction.
@Inject
public TransportAction(Settings settings, TransportService transportService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
JobProvider jobProvider, ClusterService clusterService, InternalClient client, Auditor auditor) {
JobProvider jobProvider, ClusterService clusterService, Client client, Auditor auditor) {
super(settings, NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, Request::new);
this.client = client;
this.jobProvider = jobProvider;

View File

@ -13,6 +13,7 @@ import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
@ -67,6 +68,9 @@ import java.util.Objects;
import java.util.Set;
import java.util.function.LongSupplier;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
/**
* <p>
* This action returns summarized bucket results over multiple jobs.
@ -458,7 +462,8 @@ public class GetOverallBucketsAction
maxBucketSpanMillis, jobsContext.indices);
searchRequest.source().aggregation(AggregationBuilders.min(EARLIEST_TIME).field(Result.TIMESTAMP.getPreferredName()));
searchRequest.source().aggregation(AggregationBuilders.max(LATEST_TIME).field(Result.TIMESTAMP.getPreferredName()));
client.search(searchRequest, ActionListener.wrap(searchResponse -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest,
ActionListener.<SearchResponse>wrap(searchResponse -> {
long totalHits = searchResponse.getHits().getTotalHits();
if (totalHits > 0) {
Aggregations aggregations = searchResponse.getAggregations();
@ -471,7 +476,8 @@ public class GetOverallBucketsAction
} else {
listener.onResponse(null);
}
}, listener::onFailure));
}, listener::onFailure),
client::search);
}
private static class JobsContext {
@ -540,16 +546,19 @@ public class GetOverallBucketsAction
listener.onResponse(overallBucketsProcessor.finish());
return;
}
client.search(nextSearch(), ActionListener.wrap(searchResponse -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, nextSearch(),
ActionListener.<SearchResponse>wrap(searchResponse -> {
Histogram histogram = searchResponse.getAggregations().get(Result.TIMESTAMP.getPreferredName());
overallBucketsProcessor.process(overallBucketsProvider.computeOverallBuckets(histogram));
if (overallBucketsProcessor.size() > MAX_RESULT_COUNT) {
listener.onFailure(ExceptionsHelper.badRequestException("Unable to return more than [{}] results; please use " +
listener.onFailure(
ExceptionsHelper.badRequestException("Unable to return more than [{}] results; please use " +
"parameters [{}] and [{}] to limit the time range", MAX_RESULT_COUNT, Request.START, Request.END));
return;
}
searchAndComputeOverallBuckets(listener);
}, listener::onFailure));
}, listener::onFailure),
client::search);
}
SearchRequest nextSearch() {

View File

@ -21,6 +21,7 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -72,7 +73,6 @@ import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData.Persiste
import org.elasticsearch.xpack.persistent.PersistentTasksExecutor;
import org.elasticsearch.xpack.persistent.PersistentTasksService;
import org.elasticsearch.xpack.persistent.PersistentTasksService.WaitForPersistentTaskStatusListener;
import org.elasticsearch.xpack.security.InternalClient;
import java.io.IOException;
import java.util.ArrayList;
@ -85,6 +85,8 @@ import java.util.Objects;
import java.util.Set;
import java.util.function.Predicate;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager.MAX_OPEN_JOBS_PER_NODE;
public class OpenJobAction extends Action<OpenJobAction.Request, OpenJobAction.Response, OpenJobAction.RequestBuilder> {
@ -387,12 +389,12 @@ public class OpenJobAction extends Action<OpenJobAction.Request, OpenJobAction.R
private final XPackLicenseState licenseState;
private final PersistentTasksService persistentTasksService;
private final InternalClient client;
private final Client client;
@Inject
public TransportAction(Settings settings, TransportService transportService, ThreadPool threadPool, XPackLicenseState licenseState,
ClusterService clusterService, PersistentTasksService persistentTasksService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, InternalClient client) {
IndexNameExpressionResolver indexNameExpressionResolver, Client client) {
super(settings, NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, Request::new);
this.licenseState = licenseState;
this.persistentTasksService = persistentTasksService;
@ -507,8 +509,8 @@ public class OpenJobAction extends Action<OpenJobAction.Request, OpenJobAction.R
PutMappingRequest putMappingRequest = new PutMappingRequest(indicesThatRequireAnUpdate);
putMappingRequest.type(ElasticsearchMappings.DOC_TYPE);
putMappingRequest.source(mapping);
client.execute(PutMappingAction.INSTANCE, putMappingRequest, ActionListener.wrap(
response -> {
executeAsyncWithOrigin(client, ML_ORIGIN, PutMappingAction.INSTANCE, putMappingRequest,
ActionListener.wrap(response -> {
if (response.isAcknowledged()) {
listener.onResponse(true);
} else {

View File

@ -28,7 +28,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;

View File

@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
@ -18,6 +19,7 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.Strings;
@ -36,12 +38,14 @@ import org.elasticsearch.xpack.ml.MlMetaIndex;
import org.elasticsearch.xpack.ml.job.config.MlFilter;
import org.elasticsearch.xpack.ml.job.messages.Messages;
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.security.InternalClient;
import java.io.IOException;
import java.util.Collections;
import java.util.Objects;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
public class PutFilterAction extends Action<PutFilterAction.Request, PutFilterAction.Response, PutFilterAction.RequestBuilder> {
@ -160,13 +164,13 @@ public class PutFilterAction extends Action<PutFilterAction.Request, PutFilterAc
public static class TransportAction extends HandledTransportAction<Request, Response> {
private final InternalClient client;
private final Client client;
@Inject
public TransportAction(Settings settings, ThreadPool threadPool,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
InternalClient client) {
Client client) {
super(settings, NAME, threadPool, transportService, actionFilters,
indexNameExpressionResolver, Request::new);
this.client = client;
@ -186,7 +190,8 @@ public class PutFilterAction extends Action<PutFilterAction.Request, PutFilterAc
bulkRequestBuilder.add(indexRequest);
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
bulkRequestBuilder.execute(new ActionListener<BulkResponse>() {
executeAsyncWithOrigin(client, ML_ORIGIN, BulkAction.INSTANCE, bulkRequestBuilder.request(),
new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse indexResponse) {
listener.onResponse(new Response());
@ -194,7 +199,8 @@ public class PutFilterAction extends Action<PutFilterAction.Request, PutFilterAc
@Override
public void onFailure(Exception e) {
listener.onFailure(new ResourceNotFoundException("Could not create filter with ID [" + filter.getId() + "]", e));
listener.onFailure(
new ResourceNotFoundException("Could not create filter with ID [" + filter.getId() + "]", e));
}
});
}

View File

@ -14,6 +14,7 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -43,7 +44,6 @@ import org.elasticsearch.xpack.ml.job.persistence.JobDataDeleter;
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSnapshot;
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.security.InternalClient;
import java.io.IOException;
import java.util.Date;
@ -252,7 +252,7 @@ extends Action<RevertModelSnapshotAction.Request, RevertModelSnapshotAction.Resp
public static class TransportAction extends TransportMasterNodeAction<Request, Response> {
private final InternalClient client;
private final Client client;
private final JobManager jobManager;
private final JobProvider jobProvider;
private final JobDataCountsPersister jobDataCountsPersister;
@ -260,7 +260,7 @@ extends Action<RevertModelSnapshotAction.Request, RevertModelSnapshotAction.Resp
@Inject
public TransportAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, JobManager jobManager, JobProvider jobProvider,
ClusterService clusterService, InternalClient client, JobDataCountsPersister jobDataCountsPersister) {
ClusterService clusterService, Client client, JobDataCountsPersister jobDataCountsPersister) {
super(settings, NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, Request::new);
this.client = client;
this.jobManager = jobManager;

View File

@ -18,6 +18,7 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -65,13 +66,15 @@ import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData.Persiste
import org.elasticsearch.xpack.persistent.PersistentTasksExecutor;
import org.elasticsearch.xpack.persistent.PersistentTasksService;
import org.elasticsearch.xpack.persistent.PersistentTasksService.WaitForPersistentTaskStatusListener;
import org.elasticsearch.xpack.security.InternalClient;
import java.io.IOException;
import java.util.Objects;
import java.util.function.LongSupplier;
import java.util.function.Predicate;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.clientWithOrigin;
public class StartDatafeedAction
extends Action<StartDatafeedAction.Request, StartDatafeedAction.Response, StartDatafeedAction.RequestBuilder> {
@ -422,7 +425,7 @@ public class StartDatafeedAction
// The start datafeed api is a low through put api, so the fact that we redirect to elected master node shouldn't be an issue.
public static class TransportAction extends TransportMasterNodeAction<Request, Response> {
private final InternalClient client;
private final Client client;
private final XPackLicenseState licenseState;
private final PersistentTasksService persistentTasksService;
@ -430,11 +433,11 @@ public class StartDatafeedAction
public TransportAction(Settings settings, TransportService transportService, ThreadPool threadPool, ClusterService clusterService,
XPackLicenseState licenseState, PersistentTasksService persistentTasksService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
InternalClient client) {
Client client) {
super(settings, NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, Request::new);
this.licenseState = licenseState;
this.persistentTasksService = persistentTasksService;
this.client = client;
this.client = clientWithOrigin(client, ML_ORIGIN);
}
@Override

View File

@ -12,12 +12,14 @@ import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.ParseField;
@ -43,12 +45,14 @@ import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSnapshot;
import org.elasticsearch.xpack.ml.job.results.Result;
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
import org.elasticsearch.xpack.security.InternalClient;
import java.io.IOException;
import java.util.Objects;
import java.util.function.Consumer;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
public class UpdateModelSnapshotAction extends Action<UpdateModelSnapshotAction.Request,
UpdateModelSnapshotAction.Response, UpdateModelSnapshotAction.RequestBuilder> {
@ -266,11 +270,11 @@ public class UpdateModelSnapshotAction extends Action<UpdateModelSnapshotAction.
public static class TransportAction extends HandledTransportAction<Request, Response> {
private final JobProvider jobProvider;
private final InternalClient client;
private final Client client;
@Inject
public TransportAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, JobProvider jobProvider, InternalClient client) {
IndexNameExpressionResolver indexNameExpressionResolver, JobProvider jobProvider, Client client) {
super(settings, NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, Request::new);
this.jobProvider = jobProvider;
this.client = client;
@ -318,7 +322,8 @@ public class UpdateModelSnapshotAction extends Action<UpdateModelSnapshotAction.
BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();
bulkRequestBuilder.add(indexRequest);
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
bulkRequestBuilder.execute(new ActionListener<BulkResponse>() {
executeAsyncWithOrigin(client, ML_ORIGIN, BulkAction.INSTANCE, bulkRequestBuilder.request(),
new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse indexResponse) {
handler.accept(true);

View File

@ -11,6 +11,7 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.rest.RestStatus;
@ -31,6 +32,9 @@ import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Supplier;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
class DatafeedJob {
private static final Logger LOGGER = Loggers.getLogger(DatafeedJob.class);
@ -263,9 +267,11 @@ class DatafeedJob {
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
Streams.copy(inputStream, outputStream);
request.setContent(new BytesArray(outputStream.toByteArray()), xContentType);
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
PostDataAction.Response response = client.execute(PostDataAction.INSTANCE, request).actionGet();
return response.getDataCounts();
}
}
private boolean isConflictException(Exception e) {
return e instanceof ElasticsearchStatusException
@ -284,7 +290,9 @@ class DatafeedJob {
private FlushJobAction.Response flushJob(FlushJobAction.Request flushRequest) {
try {
LOGGER.trace("[" + jobId + "] Sending flush request");
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
return client.execute(FlushJobAction.INSTANCE, flushRequest).actionGet();
}
} catch (Exception e) {
LOGGER.debug("[" + jobId + "] error while flushing job", e);

View File

@ -26,6 +26,9 @@ import java.util.Objects;
import java.util.function.Consumer;
import java.util.function.Supplier;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.clientWithOrigin;
public class DatafeedJobBuilder {
private final Client client;
@ -34,7 +37,7 @@ public class DatafeedJobBuilder {
private final Supplier<Long> currentTimeSupplier;
public DatafeedJobBuilder(Client client, JobProvider jobProvider, Auditor auditor, Supplier<Long> currentTimeSupplier) {
this.client = Objects.requireNonNull(client);
this.client = clientWithOrigin(client, ML_ORIGIN);
this.jobProvider = Objects.requireNonNull(jobProvider);
this.auditor = Objects.requireNonNull(auditor);
this.currentTimeSupplier = Objects.requireNonNull(currentTimeSupplier);

View File

@ -43,6 +43,8 @@ import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Consumer;
import java.util.function.Supplier;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.persistent.PersistentTasksService.WaitForPersistentTaskStatusListener;
public class DatafeedManager extends AbstractComponent {
@ -415,7 +417,8 @@ public class DatafeedManager extends AbstractComponent {
for the close job api call.
*/
closeJobRequest.setLocal(true);
client.execute(CloseJobAction.INSTANCE, closeJobRequest, new ActionListener<CloseJobAction.Response>() {
executeAsyncWithOrigin(client, ML_ORIGIN, CloseJobAction.INSTANCE, closeJobRequest,
new ActionListener<CloseJobAction.Response>() {
@Override
public void onResponse(CloseJobAction.Response response) {

View File

@ -6,7 +6,6 @@
package org.elasticsearch.xpack.ml.datafeed.extractor.chunked;
import org.elasticsearch.client.Client;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.xpack.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor;
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory;

View File

@ -11,6 +11,7 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.xpack.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor;

View File

@ -21,6 +21,8 @@ import org.elasticsearch.xpack.ml.job.config.JobUpdate;
import java.util.concurrent.LinkedBlockingQueue;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.ml.action.UpdateProcessAction.Request;
import static org.elasticsearch.xpack.ml.action.UpdateProcessAction.Response;
@ -98,7 +100,7 @@ public class UpdateJobProcessNotifier extends AbstractComponent implements Local
void executeRemoteJob(JobUpdate update) {
Request request = new Request(update.getJobId(), update.getModelPlotConfig(), update.getDetectorUpdates());
client.execute(UpdateProcessAction.INSTANCE, request,
executeAsyncWithOrigin(client, ML_ORIGIN, UpdateProcessAction.INSTANCE, request,
new ActionListener<Response>() {
@Override
public void onResponse(Response response) {

View File

@ -8,6 +8,8 @@ package org.elasticsearch.xpack.ml.job.persistence;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.index.IndexAction;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.component.AbstractComponent;
@ -19,6 +21,8 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.state.DataCounts;
import java.io.IOException;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
/**
* Update a job's dataCounts
@ -47,9 +51,11 @@ public class JobDataCountsPersister extends AbstractComponent {
*/
public void persistDataCounts(String jobId, DataCounts counts, ActionListener<Boolean> listener) {
try (XContentBuilder content = serialiseCounts(counts)) {
client.prepareIndex(AnomalyDetectorsIndex.resultsWriteAlias(jobId), ElasticsearchMappings.DOC_TYPE,
final IndexRequest request = client.prepareIndex(AnomalyDetectorsIndex.resultsWriteAlias(jobId), ElasticsearchMappings.DOC_TYPE,
DataCounts.documentId(jobId))
.setSource(content).execute(new ActionListener<IndexResponse>() {
.setSource(content)
.request();
executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, request, new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse indexResponse) {
listener.onResponse(true);

View File

@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml.job.persistence;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BulkAction;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
@ -15,10 +16,10 @@ import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.query.ConstantScoreQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.reindex.BulkByScrollResponse;
import org.elasticsearch.index.reindex.DeleteByQueryAction;
import org.elasticsearch.index.reindex.DeleteByQueryRequest;
import org.elasticsearch.search.builder.SearchSourceBuilder;
@ -29,6 +30,10 @@ import org.elasticsearch.xpack.ml.job.results.Result;
import java.util.List;
import java.util.Objects;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
public class JobDataDeleter {
private static final Logger LOGGER = Loggers.getLogger(JobDataDeleter.class);
@ -105,8 +110,7 @@ public class JobDataDeleter {
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
try {
// TODO: change docDeleteListener to listener in 7.0
bulkRequestBuilder.execute(docDeleteListener);
executeAsyncWithOrigin(client, ML_ORIGIN, BulkAction.INSTANCE, bulkRequestBuilder.request(), docDeleteListener);
} catch (Exception e) {
listener.onFailure(e);
}
@ -127,17 +131,8 @@ public class JobDataDeleter {
.filter(QueryBuilders.rangeQuery(Result.TIMESTAMP.getPreferredName()).gte(cutoffEpochMs));
deleteByQueryHolder.searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen());
deleteByQueryHolder.searchRequest.source(new SearchSourceBuilder().query(query));
client.execute(DeleteByQueryAction.INSTANCE, deleteByQueryHolder.dbqRequest, new ActionListener<BulkByScrollResponse>() {
@Override
public void onResponse(BulkByScrollResponse bulkByScrollResponse) {
listener.onResponse(true);
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, deleteByQueryHolder.dbqRequest,
ActionListener.wrap(r -> listener.onResponse(true), listener::onFailure));
}
/**
@ -151,7 +146,7 @@ public class JobDataDeleter {
QueryBuilder qb = QueryBuilders.termQuery(Result.IS_INTERIM.getPreferredName(), true);
deleteByQueryHolder.searchRequest.source(new SearchSourceBuilder().query(new ConstantScoreQueryBuilder(qb)));
try {
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
client.execute(DeleteByQueryAction.INSTANCE, deleteByQueryHolder.dbqRequest).get();
} catch (Exception e) {
LOGGER.error("[" + jobId + "] An error occurred while deleting interim results", e);

View File

@ -12,7 +12,11 @@ import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
import org.elasticsearch.action.search.MultiSearchRequestBuilder;
import org.elasticsearch.action.search.MultiSearchResponse;
@ -32,6 +36,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
@ -89,6 +94,10 @@ import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Supplier;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
public class JobProvider {
private static final Logger LOGGER = Loggers.getLogger(JobProvider.class);
@ -200,7 +209,8 @@ public class JobProvider {
}
};
msearch.execute(searchResponseActionListener);
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, msearch.request(), searchResponseActionListener,
client::multiSearch);
}
@ -214,15 +224,14 @@ public class JobProvider {
String writeAliasName = AnomalyDetectorsIndex.resultsWriteAlias(job.getId());
String indexName = job.getResultsIndexName();
final ActionListener<Boolean> createAliasListener = ActionListener.wrap(success ->
client.admin().indices().prepareAliases()
final ActionListener<Boolean> createAliasListener = ActionListener.wrap(success -> {
final IndicesAliasesRequest request = client.admin().indices().prepareAliases()
.addAlias(indexName, readAliasName, QueryBuilders.termQuery(Job.ID.getPreferredName(), job.getId()))
.addAlias(indexName, writeAliasName)
// we could return 'success && r.isAcknowledged()' instead of 'true', but that makes
// testing not possible as we can't create IndicesAliasesResponse instance or
// mock IndicesAliasesResponse#isAcknowledged()
.execute(ActionListener.wrap(r -> finalListener.onResponse(true), finalListener::onFailure)),
finalListener::onFailure);
.addAlias(indexName, writeAliasName).request();
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, request,
ActionListener.<IndicesAliasesResponse>wrap(r -> finalListener.onResponse(true), finalListener::onFailure),
client.admin().indices()::aliases);
}, finalListener::onFailure);
// Indices can be shared, so only create if it doesn't exist already. Saves us a roundtrip if
// already in the CS
@ -234,8 +243,8 @@ public class JobProvider {
try (XContentBuilder termFieldsMapping = ElasticsearchMappings.termFieldsMapping(ElasticsearchMappings.DOC_TYPE, termFields)) {
createIndexRequest.mapping(ElasticsearchMappings.DOC_TYPE, termFieldsMapping);
}
client.admin().indices().create(createIndexRequest,
ActionListener.wrap(
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, createIndexRequest,
ActionListener.<CreateIndexResponse>wrap(
r -> createAliasListener.onResponse(r.isAcknowledged()),
e -> {
// Possible that the index was created while the request was executing,
@ -248,7 +257,7 @@ public class JobProvider {
finalListener.onFailure(e);
}
}
));
), client.admin().indices()::create);
} else {
long fieldCountLimit = MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.get(settings);
if (violatedFieldCountLimit(indexName, termFields.size(), fieldCountLimit, state)) {
@ -297,9 +306,9 @@ public class JobProvider {
private void updateIndexMappingWithTermFields(String indexName, Collection<String> termFields, ActionListener<Boolean> listener) {
// Put the whole "doc" mapping, not just the term fields, otherwise we'll wipe the _meta section of the mapping
try (XContentBuilder termFieldsMapping = ElasticsearchMappings.docMapping(termFields)) {
client.admin().indices().preparePutMapping(indexName).setType(ElasticsearchMappings.DOC_TYPE)
.setSource(termFieldsMapping)
.execute(new ActionListener<PutMappingResponse>() {
final PutMappingRequest request = client.admin().indices().preparePutMapping(indexName).setType(ElasticsearchMappings.DOC_TYPE)
.setSource(termFieldsMapping).request();
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, request, new ActionListener<PutMappingResponse>() {
@Override
public void onResponse(PutMappingResponse putMappingResponse) {
listener.onResponse(putMappingResponse.isAcknowledged());
@ -309,7 +318,7 @@ public class JobProvider {
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}, client.admin().indices()::putMapping);
} catch (IOException e) {
listener.onFailure(e);
}
@ -353,7 +362,8 @@ public class JobProvider {
msearch.add(createDocIdSearch(MlMetaIndex.INDEX_NAME, filterId));
}
msearch.execute(ActionListener.wrap(
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, msearch.request(),
ActionListener.<MultiSearchResponse>wrap(
response -> {
for (int i = 0; i < response.getResponses().length; i++) {
MultiSearchResponse.Item itemResponse = response.getResponses()[i];
@ -389,7 +399,7 @@ public class JobProvider {
consumer.accept(paramsBuilder.build());
},
errorHandler
));
), client::multiSearch);
}
private SearchRequestBuilder createDocIdSearch(String index, String id) {
@ -456,7 +466,8 @@ public class JobProvider {
searchRequest.source(query.build());
searchRequest.indicesOptions(addIgnoreUnavailable(SearchRequest.DEFAULT_INDICES_OPTIONS));
client.search(searchRequest, ActionListener.wrap(searchResponse -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest,
ActionListener.<SearchResponse>wrap(searchResponse -> {
SearchHits hits = searchResponse.getHits();
List<Bucket> results = new ArrayList<>();
for (SearchHit hit : hits.getHits()) {
@ -482,7 +493,7 @@ public class JobProvider {
} else {
handler.accept(buckets);
}
}, e -> errorHandler.accept(mapAuthFailure(e, jobId, GetBucketsAction.NAME))));
}, e -> errorHandler.accept(mapAuthFailure(e, jobId, GetBucketsAction.NAME))), client::search);
}
private void expandBuckets(String jobId, BucketsQueryBuilder query, QueryPage<Bucket> buckets, Iterator<Bucket> bucketsToExpand,
@ -585,7 +596,8 @@ public class JobProvider {
throw new IllegalStateException("Both categoryId and pageParams are not specified");
}
searchRequest.source(sourceBuilder);
client.search(searchRequest, ActionListener.wrap(searchResponse -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest,
ActionListener.<SearchResponse>wrap(searchResponse -> {
SearchHit[] hits = searchResponse.getHits().getHits();
List<CategoryDefinition> results = new ArrayList<>(hits.length);
for (SearchHit hit : hits) {
@ -600,7 +612,7 @@ public class JobProvider {
QueryPage<CategoryDefinition> result =
new QueryPage<>(results, searchResponse.getHits().getTotalHits(), CategoryDefinition.RESULTS_FIELD);
handler.accept(result);
}, e -> errorHandler.accept(mapAuthFailure(e, jobId, GetCategoriesAction.NAME))));
}, e -> errorHandler.accept(mapAuthFailure(e, jobId, GetCategoriesAction.NAME))), client::search);
}
/**
@ -618,7 +630,8 @@ public class JobProvider {
searchRequest.source(recordsQueryBuilder.build());
LOGGER.trace("ES API CALL: search all of records from index {} with query {}", indexName, searchSourceBuilder);
client.search(searchRequest, ActionListener.wrap(searchResponse -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest,
ActionListener.<SearchResponse>wrap(searchResponse -> {
List<AnomalyRecord> results = new ArrayList<>();
for (SearchHit hit : searchResponse.getHits().getHits()) {
BytesReference source = hit.getSourceRef();
@ -631,7 +644,7 @@ public class JobProvider {
QueryPage<AnomalyRecord> queryPage =
new QueryPage<>(results, searchResponse.getHits().getTotalHits(), AnomalyRecord.RESULTS_FIELD);
handler.accept(queryPage);
}, e -> errorHandler.accept(mapAuthFailure(e, jobId, GetRecordsAction.NAME))));
}, e -> errorHandler.accept(mapAuthFailure(e, jobId, GetRecordsAction.NAME))), client::search);
}
/**
@ -664,7 +677,8 @@ public class JobProvider {
: new FieldSortBuilder(query.getSortField()).order(query.isSortDescending() ? SortOrder.DESC : SortOrder.ASC);
searchRequest.source(new SearchSourceBuilder().query(qb).from(query.getFrom()).size(query.getSize()).sort(sb));
client.search(searchRequest, ActionListener.wrap(response -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest,
ActionListener.<SearchResponse>wrap(response -> {
List<Influencer> influencers = new ArrayList<>();
for (SearchHit hit : response.getHits().getHits()) {
BytesReference source = hit.getSourceRef();
@ -674,9 +688,10 @@ public class JobProvider {
throw new ElasticsearchParseException("failed to parse influencer", e);
}
}
QueryPage<Influencer> result = new QueryPage<>(influencers, response.getHits().getTotalHits(), Influencer.RESULTS_FIELD);
QueryPage<Influencer> result =
new QueryPage<>(influencers, response.getHits().getTotalHits(), Influencer.RESULTS_FIELD);
handler.accept(result);
}, e -> errorHandler.accept(mapAuthFailure(e, jobId, GetInfluencersAction.NAME))));
}, e -> errorHandler.accept(mapAuthFailure(e, jobId, GetInfluencersAction.NAME))), client::search);
}
/**
@ -780,7 +795,8 @@ public class JobProvider {
sourceBuilder.from(from);
sourceBuilder.size(size);
searchRequest.source(sourceBuilder);
client.search(searchRequest, ActionListener.wrap(searchResponse -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest,
ActionListener.<SearchResponse>wrap(searchResponse -> {
List<ModelSnapshot> results = new ArrayList<>();
for (SearchHit hit : searchResponse.getHits().getHits()) {
results.add(ModelSnapshot.fromJson(hit.getSourceRef()));
@ -789,7 +805,7 @@ public class JobProvider {
QueryPage<ModelSnapshot> result =
new QueryPage<>(results, searchResponse.getHits().getTotalHits(), ModelSnapshot.RESULTS_FIELD);
handler.accept(result);
}, errorHandler));
}, errorHandler), client::search);
}
public QueryPage<ModelPlot> modelPlot(String jobId, int from, int size) {
@ -797,11 +813,13 @@ public class JobProvider {
String indexName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId);
LOGGER.trace("ES API CALL: search model plots from index {} from {} size {}", indexName, from, size);
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
searchResponse = client.prepareSearch(indexName)
.setIndicesOptions(addIgnoreUnavailable(SearchRequest.DEFAULT_INDICES_OPTIONS))
.setQuery(new TermsQueryBuilder(Result.RESULT_TYPE.getPreferredName(), ModelPlot.RESULT_TYPE_VALUE))
.setFrom(from).setSize(size)
.get();
}
List<ModelPlot> results = new ArrayList<>();
@ -834,7 +852,8 @@ public class JobProvider {
private <U, T> void searchSingleResult(String jobId, String resultDescription, SearchRequestBuilder search,
BiFunction<XContentParser, U, T> objectParser, Consumer<Result<T>> handler,
Consumer<Exception> errorHandler, Supplier<T> notFoundSupplier) {
search.execute(ActionListener.wrap(
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, search.request(),
ActionListener.<SearchResponse>wrap(
response -> {
SearchHit[] hits = response.getHits().getHits();
if (hits.length == 0) {
@ -847,7 +866,7 @@ public class JobProvider {
+ hits.length + "] hits even though size was 1"));
}
}, errorHandler
));
), client::search);
}
private SearchRequestBuilder createLatestModelSizeStatsSearch(String indexName) {
@ -892,22 +911,24 @@ public class JobProvider {
.filter(QueryBuilders.rangeQuery(Result.TIMESTAMP.getPreferredName()).gte(searchFromTimeMs))
.filter(QueryBuilders.termQuery(Result.RESULT_TYPE.getPreferredName(), ModelSizeStats.RESULT_TYPE_VALUE)))
.addAggregation(AggregationBuilders.extendedStats("es").field(ModelSizeStats.MODEL_BYTES_FIELD.getPreferredName()));
search.execute(ActionListener.wrap(
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, search.request(),
ActionListener.<SearchResponse>wrap(
response -> {
List<Aggregation> aggregations = response.getAggregations().asList();
if (aggregations.size() == 1) {
ExtendedStats extendedStats = (ExtendedStats) aggregations.get(0);
long count = extendedStats.getCount();
if (count <= 0) {
// model size stats haven't changed in the last N buckets, so the latest (older) ones are established
// model size stats haven't changed in the last N buckets,
// so the latest (older) ones are established
handleLatestModelSizeStats(jobId, latestModelSizeStats, handler, errorHandler);
} else if (count == 1) {
// no need to do an extra search in the case of exactly one document being aggregated
handler.accept((long) extendedStats.getAvg());
} else {
double coefficientOfVaration = extendedStats.getStdDeviation() / extendedStats.getAvg();
LOGGER.trace("[{}] Coefficient of variation [{}] when calculating established memory use", jobId,
coefficientOfVaration);
LOGGER.trace("[{}] Coefficient of variation [{}] when calculating established memory use",
jobId, coefficientOfVaration);
// is there sufficient stability in the latest model size stats readings?
if (coefficientOfVaration <= ESTABLISHED_MEMORY_CV_THRESHOLD) {
// yes, so return the latest model size as established
@ -921,7 +942,7 @@ public class JobProvider {
handler.accept(0L);
}
}, errorHandler
));
), client::search);
} else {
LOGGER.trace("[{}] Insufficient history to calculate established memory use", jobId);
handler.accept(0L);

View File

@ -12,6 +12,7 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.ml.job.process.normalizer.BucketNormalizable;
@ -23,6 +24,8 @@ import java.io.IOException;
import java.util.List;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
import static org.elasticsearch.xpack.ml.job.persistence.ElasticsearchMappings.DOC_TYPE;
@ -98,10 +101,12 @@ public class JobRenormalizedResultsPersister extends AbstractComponent {
}
logger.trace("[{}] ES API CALL: bulk request with {} actions", jobId, bulkRequest.numberOfActions());
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
BulkResponse addRecordsResponse = client.bulk(bulkRequest).actionGet();
if (addRecordsResponse.hasFailures()) {
logger.error("[{}] Bulk index of results has errors: {}", jobId, addRecordsResponse.buildFailureMessage());
}
}
bulkRequest = new BulkRequest();
}

View File

@ -20,6 +20,7 @@ import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSizeStats;
@ -40,6 +41,9 @@ import java.util.List;
import java.util.Objects;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
import static org.elasticsearch.xpack.ml.job.persistence.ElasticsearchMappings.DOC_TYPE;
/**
@ -187,10 +191,12 @@ public class JobResultsPersister extends AbstractComponent {
}
logger.trace("[{}] ES API CALL: bulk request with {} actions", jobId, bulkRequest.numberOfActions());
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
BulkResponse addRecordsResponse = client.bulk(bulkRequest).actionGet();
if (addRecordsResponse.hasFailures()) {
logger.error("[{}] Bulk index of results has errors: {}", jobId, addRecordsResponse.buildFailureMessage());
}
}
bulkRequest = new BulkRequest();
}
@ -284,8 +290,10 @@ public class JobResultsPersister extends AbstractComponent {
logger.trace("[{}] ES API CALL: refresh index {}", jobId, indexName);
RefreshRequest refreshRequest = new RefreshRequest(indexName);
refreshRequest.indicesOptions(IndicesOptions.lenientExpandOpen());
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
client.admin().indices().refresh(refreshRequest).actionGet();
}
}
/**
* Once the job state has been written calling this function makes it
@ -299,8 +307,10 @@ public class JobResultsPersister extends AbstractComponent {
logger.trace("[{}] ES API CALL: refresh index {}", jobId, indexName);
RefreshRequest refreshRequest = new RefreshRequest(indexName);
refreshRequest.indicesOptions(IndicesOptions.lenientExpandOpen());
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
client.admin().indices().refresh(refreshRequest).actionGet();
}
}
private XContentBuilder toXContentBuilder(ToXContent obj) throws IOException {
XContentBuilder builder = jsonBuilder();
@ -337,7 +347,7 @@ public class JobResultsPersister extends AbstractComponent {
try (XContentBuilder content = toXContentBuilder(object)) {
IndexRequest indexRequest = new IndexRequest(indexName, DOC_TYPE, id).source(content).setRefreshPolicy(refreshPolicy);
client.index(indexRequest, listener);
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, indexRequest, listener, client::index);
} catch (IOException e) {
logger.error(new ParameterizedMessage("[{}] Error writing [{}]", jobId, (id == null) ? "auto-generated ID" : id), e);
IndexResponse.Builder notCreatedResponse = new IndexResponse.Builder();

View File

@ -8,7 +8,9 @@ package org.elasticsearch.xpack.ml.job.persistence;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchRequest;
@ -39,6 +41,9 @@ import java.util.List;
import java.util.Set;
import java.util.function.Consumer;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
public class JobStorageDeletionTask extends Task {
private final Logger logger;
@ -88,7 +93,7 @@ public class JobStorageDeletionTask extends Task {
request.setAbortOnVersionConflict(false);
request.setRefresh(true);
client.execute(DeleteByQueryAction.INSTANCE, request, dbqHandler);
executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, request, dbqHandler);
},
failureHandler);
@ -119,7 +124,7 @@ public class JobStorageDeletionTask extends Task {
request.setAbortOnVersionConflict(false);
request.setRefresh(true);
client.execute(DeleteByQueryAction.INSTANCE, request, ActionListener.wrap(
executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, request, ActionListener.wrap(
response -> finishedHandler.onResponse(true),
e -> {
// It's not a problem for us if the index wasn't found - it's equivalent to document not found
@ -155,7 +160,7 @@ public class JobStorageDeletionTask extends Task {
request.setAbortOnVersionConflict(false);
request.setRefresh(true);
client.execute(DeleteByQueryAction.INSTANCE, request, ActionListener.wrap(
executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, request, ActionListener.wrap(
response -> {
// If we successfully deleted a document try the next one; if not we're done
if (response.getDeleted() > 0) {
@ -183,13 +188,15 @@ public class JobStorageDeletionTask extends Task {
// first find the concrete indices associated with the aliases
GetAliasesRequest aliasesRequest = new GetAliasesRequest().aliases(readAliasName, writeAliasName)
.indicesOptions(IndicesOptions.lenientExpandOpen());
client.admin().indices().getAliases(aliasesRequest, ActionListener.wrap(
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, aliasesRequest,
ActionListener.<GetAliasesResponse>wrap(
getAliasesResponse -> {
Set<String> aliases = new HashSet<>();
getAliasesResponse.getAliases().valuesIt().forEachRemaining(
metaDataList -> metaDataList.forEach(metadata -> aliases.add(metadata.getAlias())));
if (aliases.isEmpty()) {
// don't error if the job's aliases have already been deleted - carry on and delete the rest of the job's data
// don't error if the job's aliases have already been deleted - carry on and delete the
// rest of the job's data
finishedHandler.onResponse(true);
return;
}
@ -200,10 +207,11 @@ public class JobStorageDeletionTask extends Task {
IndicesAliasesRequest.AliasActions.remove()
.aliases(aliases.toArray(new String[aliases.size()]))
.indices(indices.toArray(new String[indices.size()])));
client.admin().indices().aliases(removeRequest, ActionListener.wrap(
removeResponse -> finishedHandler.onResponse(true),
finishedHandler::onFailure));
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, removeRequest,
ActionListener.<IndicesAliasesResponse>wrap(removeResponse -> finishedHandler.onResponse(true),
finishedHandler::onFailure),
client.admin().indices()::aliases);
},
finishedHandler::onFailure));
finishedHandler::onFailure), client.admin().indices()::getAliases);
}
}

View File

@ -12,6 +12,7 @@ import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.CategorizerState;
import org.elasticsearch.xpack.ml.job.process.autodetect.state.ModelSnapshot;
@ -19,6 +20,9 @@ import java.io.IOException;
import java.io.OutputStream;
import java.util.Objects;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
/**
* A {@code StateStreamer} fetches the various state documents and
* writes them into a stream. It allows cancellation via its
@ -66,6 +70,7 @@ public class StateStreamer {
LOGGER.trace("ES API CALL: get ID {} from index {}", stateDocId, indexName);
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
GetResponse stateResponse = client.prepareGet(indexName, ElasticsearchMappings.DOC_TYPE, stateDocId).get();
if (!stateResponse.isExists()) {
LOGGER.error("Expected {} documents for model state for {} snapshot {} but failed to find {}",
@ -74,6 +79,7 @@ public class StateStreamer {
}
writeStateToStream(stateResponse.getSourceAsBytesRef(), restoreStream);
}
}
// Secondly try to restore categorizer state. This must come after model state because that's
// the order the C++ process expects. There are no snapshots for this, so the IDs simply
@ -88,12 +94,14 @@ public class StateStreamer {
LOGGER.trace("ES API CALL: get ID {} from index {}", docId, indexName);
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
GetResponse stateResponse = client.prepareGet(indexName, ElasticsearchMappings.DOC_TYPE, docId).get();
if (!stateResponse.isExists()) {
break;
}
writeStateToStream(stateResponse.getSourceAsBytesRef(), restoreStream);
}
}
}

View File

@ -42,6 +42,9 @@ import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
/**
* A runnable class that reads the autodetect process output in the
* {@link #process(AutodetectProcess)} method and persists parsed
@ -299,7 +302,7 @@ public class AutoDetectResultProcessor {
return;
}
client.execute(UpdateJobAction.INSTANCE, updateRequest, new ActionListener<PutJobAction.Response>() {
executeAsyncWithOrigin(client, ML_ORIGIN, UpdateJobAction.INSTANCE, updateRequest, new ActionListener<PutJobAction.Response>() {
@Override
public void onResponse(PutJobAction.Response response) {
updateModelSnapshotIdSemaphore.release();
@ -309,7 +312,8 @@ public class AutoDetectResultProcessor {
@Override
public void onFailure(Exception e) {
updateModelSnapshotIdSemaphore.release();
LOGGER.error("[" + jobId + "] Failed to update job with new model snapshot id [" + modelSnapshot.getSnapshotId() + "]", e);
LOGGER.error("[" + jobId + "] Failed to update job with new model snapshot id [" +
modelSnapshot.getSnapshotId() + "]", e);
}
});
}

View File

@ -12,6 +12,7 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.bytes.CompositeBytesReference;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.xpack.ml.job.persistence.AnomalyDetectorsIndex;
import org.elasticsearch.xpack.ml.job.persistence.ElasticsearchMappings;
@ -21,6 +22,9 @@ import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
/**
* Reads the autodetect state and persists via a bulk request
*/
@ -91,9 +95,11 @@ public class StateProcessor extends AbstractComponent {
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(bytes, AnomalyDetectorsIndex.jobStateIndexName(), ElasticsearchMappings.DOC_TYPE, XContentType.JSON);
if (bulkRequest.numberOfActions() > 0) {
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
client.bulk(bulkRequest).actionGet();
}
}
}
private static int findNextZeroByte(BytesReference bytesRef, int searchFrom, int splitFrom) {
for (int i = Math.max(searchFrom, splitFrom); i < bytesRef.length(); ++i) {

View File

@ -33,6 +33,9 @@ import java.time.ZonedDateTime;
import java.time.format.DateTimeFormatter;
import java.util.Objects;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
/**
* Removes all results that have expired the configured retention time
* of their respective job. A result is deleted if its timestamp is earlier
@ -62,7 +65,7 @@ public class ExpiredResultsRemover extends AbstractExpiredJobDataRemover {
LOGGER.debug("Removing results of job [{}] that have a timestamp before [{}]", job.getId(), cutoffEpochMs);
DeleteByQueryRequest request = createDBQRequest(job, cutoffEpochMs);
client.execute(DeleteByQueryAction.INSTANCE, request, new ActionListener<BulkByScrollResponse>() {
executeAsyncWithOrigin(client, ML_ORIGIN, DeleteByQueryAction.INSTANCE, request, new ActionListener<BulkByScrollResponse>() {
@Override
public void onResponse(BulkByScrollResponse bulkByScrollResponse) {
try {

View File

@ -21,6 +21,8 @@ import java.io.IOException;
import java.util.Objects;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
public class Auditor {
@ -51,7 +53,7 @@ public class Auditor {
IndexRequest indexRequest = new IndexRequest(NOTIFICATIONS_INDEX, type);
indexRequest.source(toXContentBuilder(toXContent));
indexRequest.timeout(TimeValue.timeValueSeconds(5));
client.index(indexRequest, new ActionListener<IndexResponse>() {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, indexRequest, new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse indexResponse) {
LOGGER.trace("Successfully persisted {}", type);
@ -61,7 +63,7 @@ public class Auditor {
public void onFailure(Exception e) {
LOGGER.debug(new ParameterizedMessage("Error writing {}", new Object[]{type}, e));
}
});
}, client::index);
}
private XContentBuilder toXContentBuilder(ToXContent toXContent) {

View File

@ -7,6 +7,8 @@ package org.elasticsearch.xpack.monitoring;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
@ -43,7 +45,6 @@ import org.elasticsearch.xpack.monitoring.exporter.Exporters;
import org.elasticsearch.xpack.monitoring.exporter.http.HttpExporter;
import org.elasticsearch.xpack.monitoring.exporter.local.LocalExporter;
import org.elasticsearch.xpack.monitoring.rest.action.RestMonitoringBulkAction;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.ssl.SSLService;
import java.util.ArrayList;
@ -137,7 +138,7 @@ public class Monitoring implements ActionPlugin {
return modules;
}
public Collection<Object> createComponents(InternalClient client, ThreadPool threadPool, ClusterService clusterService,
public Collection<Object> createComponents(Client client, ThreadPool threadPool, ClusterService clusterService,
LicenseService licenseService, SSLService sslService) {
if (enabled == false || tribeNode) {
return Collections.emptyList();

View File

@ -5,11 +5,13 @@
*/
package org.elasticsearch.xpack.monitoring.collector.ml;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.xpack.XPackClient;
import org.elasticsearch.xpack.XPackSettings;
@ -17,11 +19,13 @@ import org.elasticsearch.xpack.ml.action.GetJobsStatsAction;
import org.elasticsearch.xpack.ml.client.MachineLearningClient;
import org.elasticsearch.xpack.monitoring.collector.Collector;
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
import org.elasticsearch.xpack.security.InternalClient;
import java.util.List;
import java.util.stream.Collectors;
import static org.elasticsearch.xpack.ClientHelper.MONITORING_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
/**
* Collector for Machine Learning Job Stats.
* <p>
@ -37,18 +41,19 @@ public class JobStatsCollector extends Collector {
*/
public static final Setting<TimeValue> JOB_STATS_TIMEOUT = collectionTimeoutSetting("ml.job.stats.timeout");
private final ThreadContext threadContext;
private final MachineLearningClient client;
public JobStatsCollector(final Settings settings, final ClusterService clusterService,
final XPackLicenseState licenseState, final InternalClient client) {
this(settings, clusterService, licenseState, new XPackClient(client).machineLearning());
final XPackLicenseState licenseState, final Client client) {
this(settings, clusterService, licenseState, new XPackClient(client).machineLearning(), client.threadPool().getThreadContext());
}
JobStatsCollector(final Settings settings, final ClusterService clusterService,
final XPackLicenseState licenseState, final MachineLearningClient client) {
final XPackLicenseState licenseState, final MachineLearningClient client, final ThreadContext threadContext) {
super(settings, JobStatsMonitoringDoc.TYPE, clusterService, JOB_STATS_TIMEOUT, licenseState);
this.client = client;
this.threadContext = threadContext;
}
@Override
@ -62,6 +67,7 @@ public class JobStatsCollector extends Collector {
@Override
protected List<MonitoringDoc> doCollect(final MonitoringDoc.Node node, final long interval) throws Exception {
// fetch details about all jobs
try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, MONITORING_ORIGIN)) {
final GetJobsStatsAction.Response jobs =
client.getJobsStats(new GetJobsStatsAction.Request(MetaData.ALL))
.actionGet(getCollectionTimeout());
@ -73,5 +79,6 @@ public class JobStatsCollector extends Collector {
.map(jobStats -> new JobStatsMonitoringDoc(clusterUuid, timestamp, interval, node, jobStats))
.collect(Collectors.toList());
}
}
}

View File

@ -9,7 +9,9 @@ import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentHelper;
@ -18,12 +20,14 @@ import org.elasticsearch.xpack.monitoring.exporter.ExportBulk;
import org.elasticsearch.xpack.monitoring.exporter.ExportException;
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
import org.elasticsearch.xpack.monitoring.exporter.MonitoringTemplateUtils;
import org.elasticsearch.xpack.security.InternalClient;
import org.joda.time.format.DateTimeFormatter;
import java.util.Arrays;
import java.util.Collection;
import static org.elasticsearch.xpack.ClientHelper.MONITORING_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
/**
* LocalBulk exports monitoring data in the local cluster using bulk requests. Its usage is not thread safe since the
* {@link LocalBulk#add(Collection)}, {@link LocalBulk#flush(org.elasticsearch.action.ActionListener)} and
@ -32,14 +36,14 @@ import java.util.Collection;
public class LocalBulk extends ExportBulk {
private final Logger logger;
private final InternalClient client;
private final Client client;
private final DateTimeFormatter formatter;
private final boolean usePipeline;
private BulkRequestBuilder requestBuilder;
LocalBulk(String name, Logger logger, InternalClient client, DateTimeFormatter dateTimeFormatter, boolean usePipeline) {
LocalBulk(String name, Logger logger, Client client, DateTimeFormatter dateTimeFormatter, boolean usePipeline) {
super(name, client.threadPool().getThreadContext());
this.logger = logger;
this.client = client;
@ -101,13 +105,15 @@ public class LocalBulk extends ExportBulk {
} else {
try {
logger.trace("exporter [{}] - exporting {} documents", name, requestBuilder.numberOfActions());
requestBuilder.execute(ActionListener.wrap(bulkResponse -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), MONITORING_ORIGIN, requestBuilder.request(),
ActionListener.<BulkResponse>wrap(bulkResponse -> {
if (bulkResponse.hasFailures()) {
throwExportException(bulkResponse.getItems(), listener);
} else {
listener.onResponse(null);
}
}, e -> listener.onFailure(new ExportException("failed to flush export bulk [{}]", e, name))));
}, e -> listener.onFailure(new ExportException("failed to flush export bulk [{}]", e, name))),
client::bulk);
} finally {
requestBuilder = null;
}

View File

@ -17,6 +17,7 @@ import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRespo
import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.action.ingest.WritePipelineResponse;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
@ -31,6 +32,7 @@ import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.index.IndexNotFoundException;
@ -44,12 +46,12 @@ import org.elasticsearch.xpack.monitoring.cleaner.CleanerService;
import org.elasticsearch.xpack.monitoring.exporter.ClusterAlertsUtil;
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
import org.elasticsearch.xpack.monitoring.exporter.MonitoringTemplateUtils;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.watcher.client.WatcherClient;
import org.elasticsearch.xpack.watcher.transport.actions.delete.DeleteWatchRequest;
import org.elasticsearch.xpack.watcher.transport.actions.get.GetWatchRequest;
import org.elasticsearch.xpack.watcher.transport.actions.get.GetWatchResponse;
import org.elasticsearch.xpack.watcher.transport.actions.put.PutWatchRequest;
import org.elasticsearch.xpack.watcher.transport.actions.put.PutWatchResponse;
import org.elasticsearch.xpack.watcher.watch.Watch;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
@ -69,6 +71,9 @@ import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import static org.elasticsearch.common.Strings.collectionToCommaDelimitedString;
import static org.elasticsearch.xpack.ClientHelper.MONITORING_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
import static org.elasticsearch.xpack.monitoring.Monitoring.CLEAN_WATCHER_HISTORY;
import static org.elasticsearch.xpack.monitoring.exporter.MonitoringTemplateUtils.LAST_UPDATED_VERSION;
import static org.elasticsearch.xpack.monitoring.exporter.MonitoringTemplateUtils.PIPELINE_IDS;
@ -82,7 +87,7 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
public static final String TYPE = "local";
private final InternalClient client;
private final Client client;
private final ClusterService clusterService;
private final XPackLicenseState licenseState;
private final CleanerService cleanerService;
@ -94,7 +99,7 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
private final AtomicBoolean waitedForSetup = new AtomicBoolean(false);
private final AtomicBoolean watcherSetup = new AtomicBoolean(false);
public LocalExporter(Exporter.Config config, InternalClient client, CleanerService cleanerService) {
public LocalExporter(Exporter.Config config, Client client, CleanerService cleanerService) {
super(config);
this.client = client;
this.clusterService = config.clusterService();
@ -306,14 +311,16 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
if (watches != null && watches.allPrimaryShardsActive() == false) {
logger.trace("cannot manage cluster alerts because [.watches] index is not allocated");
} else if ((watches == null || indexExists) && watcherSetup.compareAndSet(false, true)) {
installClusterAlerts(indexExists, asyncActions, pendingResponses);
getClusterAlertsInstallationAsyncActions(indexExists, asyncActions, pendingResponses);
}
}
if (asyncActions.size() > 0) {
if (installingSomething.compareAndSet(false, true)) {
pendingResponses.set(asyncActions.size());
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), MONITORING_ORIGIN)) {
asyncActions.forEach(Runnable::run);
}
} else {
// let the cluster catch up since requested installations may be ongoing
return false;
@ -383,7 +390,8 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
logger.debug("installing ingest pipeline [{}]", pipelineName);
client.admin().cluster().putPipeline(request, listener);
executeAsyncWithOrigin(client.threadPool().getThreadContext(), MONITORING_ORIGIN, request, listener,
client.admin().cluster()::putPipeline);
}
private boolean hasTemplate(final ClusterState clusterState, final String templateName) {
@ -392,14 +400,15 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
return template != null && hasValidVersion(template.getVersion(), LAST_UPDATED_VERSION);
}
// FIXME this should use the IndexTemplateMetaDataUpgrader
private void putTemplate(String template, String source, ActionListener<PutIndexTemplateResponse> listener) {
logger.debug("installing template [{}]", template);
PutIndexTemplateRequest request = new PutIndexTemplateRequest(template).source(source, XContentType.JSON);
assert !Thread.currentThread().isInterrupted() : "current thread has been interrupted before putting index template!!!";
// async call, so we won't block cluster event thread
client.admin().indices().putTemplate(request, listener);
executeAsyncWithOrigin(client.threadPool().getThreadContext(), MONITORING_ORIGIN, request, listener,
client.admin().indices()::putTemplate);
}
/**
@ -419,7 +428,8 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
* @param asyncActions Asynchronous actions are added to for each Watch.
* @param pendingResponses Pending response countdown we use to track completion.
*/
private void installClusterAlerts(final boolean indexExists, final List<Runnable> asyncActions, final AtomicInteger pendingResponses) {
private void getClusterAlertsInstallationAsyncActions(final boolean indexExists, final List<Runnable> asyncActions,
final AtomicInteger pendingResponses) {
final XPackClient xpackClient = new XPackClient(client);
final WatcherClient watcher = xpackClient.watcher();
final boolean canAddWatches = licenseState.isMonitoringClusterAlertsAllowed();
@ -453,8 +463,10 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
logger.trace("adding monitoring watch [{}]", uniqueWatchId);
watcher.putWatch(new PutWatchRequest(uniqueWatchId, new BytesArray(watch), XContentType.JSON),
new ResponseActionListener<>("watch", uniqueWatchId, pendingResponses, watcherSetup));
executeAsyncWithOrigin(client.threadPool().getThreadContext(), MONITORING_ORIGIN,
new PutWatchRequest(uniqueWatchId, new BytesArray(watch), XContentType.JSON),
new ResponseActionListener<PutWatchResponse>("watch", uniqueWatchId, pendingResponses, watcherSetup),
watcher::putWatch);
}
/**
@ -531,7 +543,8 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
private void deleteIndices(Set<String> indices) {
logger.trace("deleting {} indices: [{}]", indices.size(), collectionToCommaDelimitedString(indices));
client.admin().indices().delete(new DeleteIndexRequest(indices.toArray(new String[indices.size()])),
final DeleteIndexRequest request = new DeleteIndexRequest(indices.toArray(new String[indices.size()]));
executeAsyncWithOrigin(client.threadPool().getThreadContext(), MONITORING_ORIGIN, request,
new ActionListener<DeleteIndexResponse>() {
@Override
public void onResponse(DeleteIndexResponse response) {
@ -548,7 +561,7 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
public void onFailure(Exception e) {
logger.error("failed to delete indices", e);
}
});
}, client.admin().indices()::delete);
}
enum State {

View File

@ -8,6 +8,7 @@ package org.elasticsearch.xpack.persistent;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver;
import org.elasticsearch.cluster.node.DiscoveryNode;
@ -21,20 +22,22 @@ import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData.PersistentTask;
import org.elasticsearch.xpack.security.InternalClient;
import java.util.function.Predicate;
import static org.elasticsearch.xpack.ClientHelper.PERSISTENT_TASK_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
/**
* This service is used by persistent actions to propagate changes in the action state and notify about completion
*/
public class PersistentTasksService extends AbstractComponent {
private final InternalClient client;
private final Client client;
private final ClusterService clusterService;
private final ThreadPool threadPool;
public PersistentTasksService(Settings settings, ClusterService clusterService, ThreadPool threadPool, InternalClient client) {
public PersistentTasksService(Settings settings, ClusterService clusterService, ThreadPool threadPool, Client client) {
super(settings);
this.client = client;
this.clusterService = clusterService;
@ -50,8 +53,8 @@ public class PersistentTasksService extends AbstractComponent {
StartPersistentTaskAction.Request createPersistentActionRequest =
new StartPersistentTaskAction.Request(taskId, taskName, params);
try {
client.execute(StartPersistentTaskAction.INSTANCE, createPersistentActionRequest, ActionListener.wrap(
o -> listener.onResponse((PersistentTask<Params>) o.getTask()), listener::onFailure));
executeAsyncWithOrigin(client, PERSISTENT_TASK_ORIGIN, StartPersistentTaskAction.INSTANCE, createPersistentActionRequest,
ActionListener.wrap(o -> listener.onResponse((PersistentTask<Params>) o.getTask()), listener::onFailure));
} catch (Exception e) {
listener.onFailure(e);
}
@ -64,7 +67,7 @@ public class PersistentTasksService extends AbstractComponent {
ActionListener<PersistentTask<?>> listener) {
CompletionPersistentTaskAction.Request restartRequest = new CompletionPersistentTaskAction.Request(taskId, allocationId, failure);
try {
client.execute(CompletionPersistentTaskAction.INSTANCE, restartRequest,
executeAsyncWithOrigin(client, PERSISTENT_TASK_ORIGIN, CompletionPersistentTaskAction.INSTANCE, restartRequest,
ActionListener.wrap(o -> listener.onResponse(o.getTask()), listener::onFailure));
} catch (Exception e) {
listener.onFailure(e);
@ -80,7 +83,8 @@ public class PersistentTasksService extends AbstractComponent {
cancelTasksRequest.setTaskId(new TaskId(localNode.getId(), taskId));
cancelTasksRequest.setReason("persistent action was removed");
try {
client.admin().cluster().cancelTasks(cancelTasksRequest, listener);
executeAsyncWithOrigin(client.threadPool().getThreadContext(), PERSISTENT_TASK_ORIGIN, cancelTasksRequest, listener,
client.admin().cluster()::cancelTasks);
} catch (Exception e) {
listener.onFailure(e);
}
@ -96,8 +100,8 @@ public class PersistentTasksService extends AbstractComponent {
UpdatePersistentTaskStatusAction.Request updateStatusRequest =
new UpdatePersistentTaskStatusAction.Request(taskId, allocationId, status);
try {
client.execute(UpdatePersistentTaskStatusAction.INSTANCE, updateStatusRequest, ActionListener.wrap(
o -> listener.onResponse(o.getTask()), listener::onFailure));
executeAsyncWithOrigin(client, PERSISTENT_TASK_ORIGIN, UpdatePersistentTaskStatusAction.INSTANCE, updateStatusRequest,
ActionListener.wrap(o -> listener.onResponse(o.getTask()), listener::onFailure));
} catch (Exception e) {
listener.onFailure(e);
}
@ -109,8 +113,8 @@ public class PersistentTasksService extends AbstractComponent {
public void cancelPersistentTask(String taskId, ActionListener<PersistentTask<?>> listener) {
RemovePersistentTaskAction.Request removeRequest = new RemovePersistentTaskAction.Request(taskId);
try {
client.execute(RemovePersistentTaskAction.INSTANCE, removeRequest, ActionListener.wrap(o -> listener.onResponse(o.getTask()),
listener::onFailure));
executeAsyncWithOrigin(client, PERSISTENT_TASK_ORIGIN, RemovePersistentTaskAction.INSTANCE, removeRequest,
ActionListener.wrap(o -> listener.onResponse(o.getTask()), listener::onFailure));
} catch (Exception e) {
listener.onFailure(e);
}

View File

@ -1,23 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.security;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.security.user.XPackSecurityUser;
/**
* A special filter client for internal usage by security to modify the security index.
*
* The {@link XPackSecurityUser} user is added to the execution context before each action is executed.
*/
public class InternalSecurityClient extends InternalClient {
public InternalSecurityClient(Settings settings, ThreadPool threadPool, Client in) {
super(settings, threadPool, in, XPackSecurityUser.INSTANCE);
}
}

View File

@ -5,95 +5,27 @@
*/
package org.elasticsearch.xpack.security;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.action.support.ContextPreservingActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.FilterClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.node.Node;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.XPackSettings;
import org.elasticsearch.xpack.security.authc.Authentication;
import org.elasticsearch.xpack.security.user.User;
import org.elasticsearch.xpack.security.user.XPackUser;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.function.Supplier;
/**
* A special filter client for internal node communication which adds the internal xpack user to the headers.
* An optionally secured client for internal node communication.
*
* When secured, the XPack user is added to the execution context before each action is executed.
*/
public class InternalClient extends FilterClient {
public final class ScrollHelper {
private final String nodeName;
private final boolean securityEnabled;
private final User user;
/**
* Constructs an InternalClient.
* If security is enabled the client is secure. Otherwise this client is a passthrough.
*/
public InternalClient(Settings settings, ThreadPool threadPool, Client in) {
this(settings, threadPool, in, XPackUser.INSTANCE);
}
InternalClient(Settings settings, ThreadPool threadPool, Client in, User user) {
super(settings, threadPool, in);
this.nodeName = Node.NODE_NAME_SETTING.get(settings);
this.securityEnabled = XPackSettings.SECURITY_ENABLED.get(settings);
this.user = user;
}
@Override
protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends
ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
if (securityEnabled) {
final ThreadContext threadContext = threadPool().getThreadContext();
final Supplier<ThreadContext.StoredContext> storedContext = threadContext.newRestorableContext(true);
// we need to preserve the context here otherwise we execute the response with the XPack user which we can cause problems
// since we expect the callback to run with the authenticated user calling the doExecute method
try (ThreadContext.StoredContext ctx = threadContext.stashContext()) {
processContext(threadContext);
super.doExecute(action, request, new ContextPreservingActionListener<>(storedContext, listener));
}
} else {
super.doExecute(action, request, listener);
}
}
protected void processContext(ThreadContext threadContext) {
try {
Authentication authentication = new Authentication(user,
new Authentication.RealmRef("__attach", "__attach", nodeName), null);
authentication.writeToContext(threadContext);
} catch (IOException ioe) {
throw new ElasticsearchException("failed to attach internal user to request", ioe);
}
}
private ScrollHelper() {}
/**
* This method fetches all results for the given search request, parses them using the given hit parser and calls the
@ -114,7 +46,8 @@ public class InternalClient extends FilterClient {
};
// This function is MADNESS! But it works, don't think about it too hard...
// simon edit: just watch this if you got this far https://www.youtube.com/watch?v=W-lF106Dgk8
client.search(request, new ActionListener<SearchResponse>() {
client.search(request, new ContextPreservingActionListener<>(client.threadPool().getThreadContext().newRestorableContext(true),
new ActionListener<SearchResponse>() {
private volatile SearchResponse lastResponse = null;
@Override
@ -163,6 +96,6 @@ public class InternalClient extends FilterClient {
}
}
}
});
}));
}
}

View File

@ -308,13 +308,12 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin, Clus
return modules;
}
public Collection<Object> createComponents(Client nodeClient, ThreadPool threadPool, ClusterService clusterService,
public Collection<Object> createComponents(Client client, ThreadPool threadPool, ClusterService clusterService,
ResourceWatcherService resourceWatcherService,
List<XPackExtension> extensions) throws Exception {
if (enabled == false) {
return Collections.emptyList();
}
final InternalSecurityClient client = new InternalSecurityClient(settings, threadPool, nodeClient);
threadContext.set(threadPool.getThreadContext());
List<Object> components = new ArrayList<>();
securityContext.set(new SecurityContext(settings, threadPool.getThreadContext()));

View File

@ -8,6 +8,7 @@ package org.elasticsearch.xpack.security;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
@ -57,7 +58,7 @@ public class SecurityLifecycleService extends AbstractComponent implements Clust
private final IndexLifecycleManager securityIndex;
public SecurityLifecycleService(Settings settings, ClusterService clusterService,
ThreadPool threadPool, InternalSecurityClient client,
ThreadPool threadPool, Client client,
@Nullable IndexAuditTrail indexAuditTrail) {
super(settings);
this.settings = settings;

View File

@ -71,7 +71,9 @@ public class SecurityActionFilter extends AbstractComponent implements ActionFil
}
@Override
public void apply(Task task, String action, ActionRequest request, ActionListener listener, ActionFilterChain chain) {
public <Request extends ActionRequest, Response extends ActionResponse> void apply(Task task, String action, Request request,
ActionListener<Response> listener,
ActionFilterChain<Request, Response> chain) {
/*
A functional requirement - when the license of security is disabled (invalid/expires), security will continue
@ -85,11 +87,11 @@ public class SecurityActionFilter extends AbstractComponent implements ActionFil
}
if (licenseState.isAuthAllowed()) {
final boolean useSystemUser = AuthorizationUtils.shouldReplaceUserWithSystem(threadContext, action);
final ActionListener<ActionResponse> contextPreservingListener =
final ActionListener<Response> contextPreservingListener =
ContextPreservingActionListener.wrapPreservingContext(listener, threadContext);
ActionListener<Void> authenticatedListener = ActionListener.wrap(
(aVoid) -> chain.proceed(task, action, request, contextPreservingListener), contextPreservingListener::onFailure);
final boolean useSystemUser = AuthorizationUtils.shouldReplaceUserWithSystem(threadContext, action);
try {
if (useSystemUser) {
securityContext.executeAsUser(SystemUser.INSTANCE, (original) -> {
@ -99,6 +101,14 @@ public class SecurityActionFilter extends AbstractComponent implements ActionFil
listener.onFailure(e);
}
}, Version.CURRENT);
} else if (AuthorizationUtils.shouldSetUserBasedOnActionOrigin(threadContext)) {
AuthorizationUtils.switchUserBasedOnActionOriginAndExecute(threadContext, securityContext, (original) -> {
try {
applyInternal(action, request, authenticatedListener);
} catch (IOException e) {
listener.onFailure(e);
}
});
} else {
try (ThreadContext.StoredContext ignore = threadContext.newStoredContext(true)) {
applyInternal(action, request, authenticatedListener);
@ -119,7 +129,8 @@ public class SecurityActionFilter extends AbstractComponent implements ActionFil
return Integer.MIN_VALUE;
}
private void applyInternal(String action, final ActionRequest request, ActionListener<Void> listener) throws IOException {
private <Request extends ActionRequest> void applyInternal(String action, Request request,
ActionListener<Void> listener) throws IOException {
if (CloseIndexAction.NAME.equals(action) || OpenIndexAction.NAME.equals(action) || DeleteIndexAction.NAME.equals(action)) {
IndicesRequest indicesRequest = (IndicesRequest) request;
try {
@ -145,7 +156,8 @@ public class SecurityActionFilter extends AbstractComponent implements ActionFil
ActionListener.wrap((authc) -> authorizeRequest(authc, securityAction, request, listener), listener::onFailure));
}
void authorizeRequest(Authentication authentication, String securityAction, ActionRequest request, ActionListener listener) {
private <Request extends ActionRequest> void authorizeRequest(Authentication authentication, String securityAction, Request request,
ActionListener<Void> listener) {
if (authentication == null) {
listener.onFailure(new IllegalArgumentException("authentication must be non null for authorization"));
} else {

View File

@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security.audit.index;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
@ -46,7 +45,6 @@ import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportMessage;
import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.security.InternalSecurityClient;
import org.elasticsearch.xpack.security.audit.AuditLevel;
import org.elasticsearch.xpack.security.audit.AuditTrail;
import org.elasticsearch.xpack.security.authc.AuthenticationToken;
@ -82,6 +80,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import static org.elasticsearch.xpack.ClientHelper.SECURITY_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.clientWithOrigin;
import static org.elasticsearch.xpack.security.Security.setting;
import static org.elasticsearch.xpack.security.audit.AuditLevel.ACCESS_DENIED;
import static org.elasticsearch.xpack.security.audit.AuditLevel.ACCESS_GRANTED;
@ -175,7 +175,7 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail {
return NAME;
}
public IndexAuditTrail(Settings settings, InternalSecurityClient client, ThreadPool threadPool, ClusterService clusterService) {
public IndexAuditTrail(Settings settings, Client client, ThreadPool threadPool, ClusterService clusterService) {
super(settings);
this.threadPool = threadPool;
this.clusterService = clusterService;
@ -189,7 +189,7 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail {
if (indexToRemoteCluster == false) {
// in the absence of client settings for remote indexing, fall back to the client that was passed in.
this.client = client;
this.client = clientWithOrigin(client, SECURITY_ORIGIN);
} else {
this.client = initializeRemoteClient(settings, logger);
}
@ -932,9 +932,7 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail {
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
logger.error(
(Supplier<?>) () -> new ParameterizedMessage(
"failed to bulk index audit events: [{}]", failure.getMessage()), failure);
logger.error(new ParameterizedMessage("failed to bulk index audit events: [{}]", failure.getMessage()), failure);
}
}).setBulkActions(bulkSize)
.setFlushInterval(interval)

View File

@ -7,37 +7,38 @@ package org.elasticsearch.xpack.security.authc;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.index.reindex.DeleteByQueryAction;
import org.elasticsearch.index.reindex.DeleteByQueryRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.reindex.DeleteByQueryAction;
import org.elasticsearch.index.reindex.DeleteByQueryRequest;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.threadpool.ThreadPool.Names;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.security.InternalSecurityClient;
import org.elasticsearch.xpack.security.SecurityLifecycleService;
import java.time.Instant;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException;
import static org.elasticsearch.xpack.ClientHelper.SECURITY_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
/**
* Responsible for cleaning the invalidated tokens from the invalidated tokens index.
*/
final class ExpiredTokenRemover extends AbstractRunnable {
private final InternalSecurityClient client;
private final Client client;
private final AtomicBoolean inProgress = new AtomicBoolean(false);
private final Logger logger;
private final TimeValue timeout;
ExpiredTokenRemover(Settings settings, InternalSecurityClient internalClient) {
this.client = internalClient;
ExpiredTokenRemover(Settings settings, Client client) {
this.client = client;
this.logger = Loggers.getLogger(getClass(), settings);
this.timeout = TokenService.DELETE_TIMEOUT.get(settings);
}
@ -54,7 +55,8 @@ final class ExpiredTokenRemover extends AbstractRunnable {
.query(QueryBuilders.boolQuery()
.filter(QueryBuilders.termQuery("doc_type", TokenService.DOC_TYPE))
.filter(QueryBuilders.rangeQuery("expiration_time").lte(Instant.now().toEpochMilli())));
client.execute(DeleteByQueryAction.INSTANCE, dbq, ActionListener.wrap(r -> markComplete(),
executeAsyncWithOrigin(client, SECURITY_ORIGIN, DeleteByQueryAction.INSTANCE, dbq,
ActionListener.wrap(r -> markComplete(),
e -> {
if (isShardNotAvailableException(e) == false) {
logger.error("failed to delete expired tokens", e);

View File

@ -6,7 +6,6 @@
package org.elasticsearch.xpack.security.authc;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.IOUtils;
@ -22,6 +21,7 @@ import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.AckedRequest;
@ -36,7 +36,6 @@ import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.SecureSetting;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
@ -49,8 +48,6 @@ import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.XPackSettings;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.security.InternalSecurityClient;
import org.elasticsearch.xpack.security.SecurityLifecycleService;
import javax.crypto.Cipher;
@ -89,6 +86,8 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicLong;
import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;
import static org.elasticsearch.xpack.ClientHelper.SECURITY_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
/**
* Service responsible for the creation, validation, and other management of {@link UserToken}
@ -133,7 +132,7 @@ public final class TokenService extends AbstractComponent {
private final Clock clock;
private final TimeValue expirationDelay;
private final TimeValue deleteInterval;
private final InternalSecurityClient internalClient;
private final Client client;
private final SecurityLifecycleService lifecycleService;
private final ExpiredTokenRemover expiredTokenRemover;
private final boolean enabled;
@ -147,9 +146,9 @@ public final class TokenService extends AbstractComponent {
* Creates a new token service
* @param settings the node settings
* @param clock the clock that will be used for comparing timestamps
* @param internalClient the client to use when checking for revocations
* @param client the client to use when checking for revocations
*/
public TokenService(Settings settings, Clock clock, InternalSecurityClient internalClient,
public TokenService(Settings settings, Clock clock, Client client,
SecurityLifecycleService lifecycleService, ClusterService clusterService) throws GeneralSecurityException {
super(settings);
byte[] saltArr = new byte[SALT_BYTES];
@ -158,12 +157,12 @@ public final class TokenService extends AbstractComponent {
final SecureString tokenPassphrase = generateTokenKey();
this.clock = clock.withZone(ZoneOffset.UTC);
this.expirationDelay = TOKEN_EXPIRATION.get(settings);
this.internalClient = internalClient;
this.client = client;
this.lifecycleService = lifecycleService;
this.lastExpirationRunMs = internalClient.threadPool().relativeTimeInMillis();
this.lastExpirationRunMs = client.threadPool().relativeTimeInMillis();
this.deleteInterval = DELETE_INTERVAL.get(settings);
this.enabled = XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.get(settings);
this.expiredTokenRemover = new ExpiredTokenRemover(settings, internalClient);
this.expiredTokenRemover = new ExpiredTokenRemover(settings, client);
this.currentVersionBytes = ByteBuffer.allocate(4).putInt(TOKEN_SERVICE_VERSION.id).array();
ensureEncryptionCiphersSupported();
KeyAndCache keyAndCache = new KeyAndCache(new KeyAndTimestamp(tokenPassphrase.clone(), createdTimeStamps.incrementAndGet()),
@ -249,7 +248,7 @@ public final class TokenService extends AbstractComponent {
* request(s) that require a key computation will be delayed and there will be
* some additional latency.
*/
internalClient.threadPool().executor(THREAD_POOL_NAME)
client.threadPool().executor(THREAD_POOL_NAME)
.submit(new KeyComputingRunnable(in, iv, version, decodedSalt, listener, keyAndCache));
}
} else {
@ -293,11 +292,12 @@ public final class TokenService extends AbstractComponent {
} else {
final String id = getDocumentId(userToken);
lifecycleService.createIndexIfNeededThenExecute(listener, () -> {
internalClient.prepareIndex(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, id)
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareIndex(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, id)
.setOpType(OpType.CREATE)
.setSource("doc_type", DOC_TYPE, "expiration_time", getExpirationTime().toEpochMilli())
.setRefreshPolicy(RefreshPolicy.WAIT_UNTIL)
.execute(new ActionListener<IndexResponse>() {
.setRefreshPolicy(RefreshPolicy.WAIT_UNTIL).request(),
new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse indexResponse) {
listener.onResponse(indexResponse.getResult() == Result.CREATED);
@ -312,7 +312,7 @@ public final class TokenService extends AbstractComponent {
listener.onFailure(e);
}
}
});
}, client::index);
});
}
}, listener::onFailure));
@ -345,8 +345,9 @@ public final class TokenService extends AbstractComponent {
"the upgrade API is run on the security index"));
return;
}
internalClient.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, getDocumentId(userToken))
.execute(new ActionListener<GetResponse>() {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME, TYPE, getDocumentId(userToken)).request(),
new ActionListener<GetResponse>() {
@Override
public void onResponse(GetResponse response) {
@ -370,7 +371,7 @@ public final class TokenService extends AbstractComponent {
listener.onFailure(e);
}
}
});
}, client::get);
} else if (lifecycleService.isSecurityIndexExisting()) {
// index exists but the index isn't available, do not trust the token
logger.warn("could not validate token as the security index is not available");
@ -391,9 +392,9 @@ public final class TokenService extends AbstractComponent {
private void maybeStartTokenRemover() {
if (lifecycleService.isSecurityIndexAvailable()) {
if (internalClient.threadPool().relativeTimeInMillis() - lastExpirationRunMs > deleteInterval.getMillis()) {
expiredTokenRemover.submit(internalClient.threadPool());
lastExpirationRunMs = internalClient.threadPool().relativeTimeInMillis();
if (client.threadPool().relativeTimeInMillis() - lastExpirationRunMs > deleteInterval.getMillis()) {
expiredTokenRemover.submit(client.threadPool());
lastExpirationRunMs = client.threadPool().relativeTimeInMillis();
}
}
}

View File

@ -6,20 +6,20 @@
package org.elasticsearch.xpack.security.authc.esnative;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.ContextPreservingActionListener;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
@ -28,6 +28,7 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.engine.DocumentMissingException;
@ -35,8 +36,7 @@ import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.security.InternalSecurityClient;
import org.elasticsearch.xpack.security.ScrollHelper;
import org.elasticsearch.xpack.security.SecurityLifecycleService;
import org.elasticsearch.xpack.security.action.realm.ClearRealmCacheRequest;
import org.elasticsearch.xpack.security.action.realm.ClearRealmCacheResponse;
@ -58,6 +58,11 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
import java.util.function.Supplier;
import static org.elasticsearch.xpack.ClientHelper.SECURITY_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
/**
* NativeUsersStore is a store for users that reads from an Elasticsearch index. This store is responsible for fetching the full
@ -74,12 +79,12 @@ public class NativeUsersStore extends AbstractComponent {
private final Hasher hasher = Hasher.BCRYPT;
private final InternalSecurityClient client;
private final Client client;
private final boolean isTribeNode;
private volatile SecurityLifecycleService securityLifecycleService;
public NativeUsersStore(Settings settings, InternalSecurityClient client, SecurityLifecycleService securityLifecycleService) {
public NativeUsersStore(Settings settings, Client client, SecurityLifecycleService securityLifecycleService) {
super(settings);
this.client = client;
this.isTribeNode = XPackPlugin.isTribeNode(settings);
@ -129,6 +134,8 @@ public class NativeUsersStore extends AbstractComponent {
.map(s -> getIdForUser(USER_DOC_TYPE, s)).toArray(String[]::new);
query = QueryBuilders.boolQuery().filter(QueryBuilders.idsQuery(INDEX_TYPE).addIds(users));
}
final Supplier<ThreadContext.StoredContext> supplier = client.threadPool().getThreadContext().newRestorableContext(false);
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN)) {
SearchRequest request = client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME)
.setScroll(TimeValue.timeValueSeconds(10L))
.setQuery(query)
@ -136,12 +143,13 @@ public class NativeUsersStore extends AbstractComponent {
.setFetchSource(true)
.request();
request.indicesOptions().ignoreUnavailable();
InternalClient.fetchAllByEntity(client, request, listener, (hit) -> {
ScrollHelper.fetchAllByEntity(client, request, new ContextPreservingActionListener<>(supplier, listener), (hit) -> {
UserAndPassword u = transformUser(hit.getId(), hit.getSourceAsMap());
return u != null ? u.user() : null;
});
}
} catch (Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("unable to retrieve users {}", Arrays.toString(userNames)), e);
logger.error(new ParameterizedMessage("unable to retrieve users {}", Arrays.toString(userNames)), e);
listener.onFailure(e);
}
}
@ -158,9 +166,10 @@ public class NativeUsersStore extends AbstractComponent {
return;
}
try {
GetRequest request = client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME,
INDEX_TYPE, getIdForUser(USER_DOC_TYPE, user)).request();
client.get(request, new ActionListener<GetResponse>() {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME,
INDEX_TYPE, getIdForUser(USER_DOC_TYPE, user)).request(),
new ActionListener<GetResponse>() {
@Override
public void onResponse(GetResponse response) {
listener.onResponse(transformUser(response.getId(), response.getSource()));
@ -170,21 +179,22 @@ public class NativeUsersStore extends AbstractComponent {
public void onFailure(Exception t) {
if (t instanceof IndexNotFoundException) {
logger.trace(
(Supplier<?>) () -> new ParameterizedMessage(
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
"could not retrieve user [{}] because security index does not exist", user), t);
} else {
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to retrieve user [{}]", user), t);
logger.error(new ParameterizedMessage("failed to retrieve user [{}]", user), t);
}
// We don't invoke the onFailure listener here, instead
// we call the response with a null user
listener.onResponse(null);
}
});
}, client::get);
} catch (IndexNotFoundException infe) {
logger.trace("could not retrieve user [{}] because security index does not exist", user);
logger.trace((org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("could not retrieve user [{}] because security index does not exist", user));
listener.onResponse(null);
} catch (Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("unable to retrieve user [{}]", user), e);
logger.error(new ParameterizedMessage("unable to retrieve user [{}]", user), e);
listener.onFailure(e);
}
}
@ -217,11 +227,12 @@ public class NativeUsersStore extends AbstractComponent {
docType = USER_DOC_TYPE;
}
securityLifecycleService.createIndexIfNeededThenExecute(listener, () ->
securityLifecycleService.createIndexIfNeededThenExecute(listener, () -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE, getIdForUser(docType, username))
.setDoc(Requests.INDEX_CONTENT_TYPE, Fields.PASSWORD.getPreferredName(), String.valueOf(request.passwordHash()))
.setRefreshPolicy(request.getRefreshPolicy())
.execute(new ActionListener<UpdateResponse>() {
.setRefreshPolicy(request.getRefreshPolicy()).request(),
new ActionListener<UpdateResponse>() {
@Override
public void onResponse(UpdateResponse updateResponse) {
assert updateResponse.getResult() == DocWriteResponse.Result.UPDATED;
@ -234,7 +245,7 @@ public class NativeUsersStore extends AbstractComponent {
if (docType.equals(RESERVED_USER_TYPE)) {
createReservedUser(username, request.passwordHash(), request.getRefreshPolicy(), listener);
} else {
logger.debug((Supplier<?>) () ->
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () ->
new ParameterizedMessage("failed to change password for user [{}]", request.username()), e);
ValidationException validationException = new ValidationException();
validationException.addValidationError("user must exist in order to change password");
@ -244,7 +255,8 @@ public class NativeUsersStore extends AbstractComponent {
listener.onFailure(e);
}
}
}));
}, client::update);
});
}
/**
@ -258,12 +270,15 @@ public class NativeUsersStore extends AbstractComponent {
"the upgrade API is run on the security index"));
return;
}
securityLifecycleService.createIndexIfNeededThenExecute(listener, () ->
client.prepareIndex(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE, getIdForUser(RESERVED_USER_TYPE, username))
.setSource(Fields.PASSWORD.getPreferredName(), String.valueOf(passwordHash), Fields.ENABLED.getPreferredName(), true,
securityLifecycleService.createIndexIfNeededThenExecute(listener, () -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareIndex(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE,
getIdForUser(RESERVED_USER_TYPE, username))
.setSource(Fields.PASSWORD.getPreferredName(), String.valueOf(passwordHash),
Fields.ENABLED.getPreferredName(), true,
Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE)
.setRefreshPolicy(refresh)
.execute(new ActionListener<IndexResponse>() {
.setRefreshPolicy(refresh).request(),
new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse indexResponse) {
clearRealmCache(username, listener, null);
@ -273,7 +288,8 @@ public class NativeUsersStore extends AbstractComponent {
public void onFailure(Exception e) {
listener.onFailure(e);
}
}));
}, client::index);
});
}
/**
@ -304,7 +320,7 @@ public class NativeUsersStore extends AbstractComponent {
indexUser(request, listener);
}
} catch (Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("unable to put user [{}]", request.username()), e);
logger.error(new ParameterizedMessage("unable to put user [{}]", request.username()), e);
listener.onFailure(e);
}
}
@ -316,7 +332,8 @@ public class NativeUsersStore extends AbstractComponent {
assert putUserRequest.passwordHash() == null;
assert !securityLifecycleService.isSecurityIndexOutOfDate() : "security index should be up to date";
// We must have an existing document
securityLifecycleService.createIndexIfNeededThenExecute(listener, () ->
securityLifecycleService.createIndexIfNeededThenExecute(listener, () -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE,
getIdForUser(USER_DOC_TYPE, putUserRequest.username()))
.setDoc(Requests.INDEX_CONTENT_TYPE,
@ -328,7 +345,8 @@ public class NativeUsersStore extends AbstractComponent {
Fields.ENABLED.getPreferredName(), putUserRequest.enabled(),
Fields.TYPE.getPreferredName(), USER_DOC_TYPE)
.setRefreshPolicy(putUserRequest.getRefreshPolicy())
.execute(new ActionListener<UpdateResponse>() {
.request(),
new ActionListener<UpdateResponse>() {
@Override
public void onResponse(UpdateResponse updateResponse) {
assert updateResponse.getResult() == DocWriteResponse.Result.UPDATED;
@ -341,21 +359,25 @@ public class NativeUsersStore extends AbstractComponent {
if (isIndexNotFoundOrDocumentMissing(e)) {
// if the index doesn't exist we can never update a user
// if the document doesn't exist, then this update is not valid
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to update user document with username [{}]",
logger.debug((org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("failed to update user document with username [{}]",
putUserRequest.username()), e);
ValidationException validationException = new ValidationException();
validationException.addValidationError("password must be specified unless you are updating an existing user");
validationException
.addValidationError("password must be specified unless you are updating an existing user");
failure = validationException;
}
listener.onFailure(failure);
}
}));
}, client::update);
});
}
private void indexUser(final PutUserRequest putUserRequest, final ActionListener<Boolean> listener) {
assert putUserRequest.passwordHash() != null;
assert !securityLifecycleService.isSecurityIndexOutOfDate() : "security index should be up to date";
securityLifecycleService.createIndexIfNeededThenExecute(listener, () ->
securityLifecycleService.createIndexIfNeededThenExecute(listener, () -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareIndex(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE,
getIdForUser(USER_DOC_TYPE, putUserRequest.username()))
.setSource(Fields.USERNAME.getPreferredName(), putUserRequest.username(),
@ -367,17 +389,20 @@ public class NativeUsersStore extends AbstractComponent {
Fields.ENABLED.getPreferredName(), putUserRequest.enabled(),
Fields.TYPE.getPreferredName(), USER_DOC_TYPE)
.setRefreshPolicy(putUserRequest.getRefreshPolicy())
.execute(new ActionListener<IndexResponse>() {
.request(),
new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse updateResponse) {
clearRealmCache(putUserRequest.username(), listener, updateResponse.getResult() == DocWriteResponse.Result.CREATED);
clearRealmCache(putUserRequest.username(), listener,
updateResponse.getResult() == DocWriteResponse.Result.CREATED);
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
}));
}, client::index);
});
}
/**
@ -411,11 +436,14 @@ public class NativeUsersStore extends AbstractComponent {
final ActionListener<Void> listener) {
assert !securityLifecycleService.isSecurityIndexOutOfDate() : "security index should be up to date";
try {
securityLifecycleService.createIndexIfNeededThenExecute(listener, () ->
client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE, getIdForUser(USER_DOC_TYPE, username))
securityLifecycleService.createIndexIfNeededThenExecute(listener, () -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE,
getIdForUser(USER_DOC_TYPE, username))
.setDoc(Requests.INDEX_CONTENT_TYPE, Fields.ENABLED.getPreferredName(), enabled)
.setRefreshPolicy(refreshPolicy)
.execute(new ActionListener<UpdateResponse>() {
.request(),
new ActionListener<UpdateResponse>() {
@Override
public void onResponse(UpdateResponse updateResponse) {
clearRealmCache(username, listener, null);
@ -427,15 +455,18 @@ public class NativeUsersStore extends AbstractComponent {
if (isIndexNotFoundOrDocumentMissing(e)) {
// if the index doesn't exist we can never update a user
// if the document doesn't exist, then this update is not valid
logger.debug((Supplier<?>) () ->
new ParameterizedMessage("failed to {} user [{}]", enabled ? "enable" : "disable", username), e);
logger.debug((org.apache.logging.log4j.util.Supplier<?>)
() -> new ParameterizedMessage("failed to {} user [{}]",
enabled ? "enable" : "disable", username), e);
ValidationException validationException = new ValidationException();
validationException.addValidationError("only existing users can be " + (enabled ? "enabled" : "disabled"));
validationException.addValidationError("only existing users can be " +
(enabled ? "enabled" : "disabled"));
failure = validationException;
}
listener.onFailure(failure);
}
}));
}, client::update);
});
} catch (Exception e) {
listener.onFailure(e);
}
@ -445,15 +476,18 @@ public class NativeUsersStore extends AbstractComponent {
boolean clearCache, final ActionListener<Void> listener) {
assert !securityLifecycleService.isSecurityIndexOutOfDate() : "security index should be up to date";
try {
securityLifecycleService.createIndexIfNeededThenExecute(listener, () ->
client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE, getIdForUser(RESERVED_USER_TYPE, username))
securityLifecycleService.createIndexIfNeededThenExecute(listener, () -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareUpdate(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE,
getIdForUser(RESERVED_USER_TYPE, username))
.setDoc(Requests.INDEX_CONTENT_TYPE, Fields.ENABLED.getPreferredName(), enabled)
.setUpsert(XContentType.JSON,
Fields.PASSWORD.getPreferredName(), "",
Fields.ENABLED.getPreferredName(), enabled,
Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE)
.setRefreshPolicy(refreshPolicy)
.execute(new ActionListener<UpdateResponse>() {
.request(),
new ActionListener<UpdateResponse>() {
@Override
public void onResponse(UpdateResponse updateResponse) {
if (clearCache) {
@ -467,7 +501,8 @@ public class NativeUsersStore extends AbstractComponent {
public void onFailure(Exception e) {
listener.onFailure(e);
}
}));
}, client::update);
});
} catch (Exception e) {
listener.onFailure(e);
}
@ -493,7 +528,7 @@ public class NativeUsersStore extends AbstractComponent {
INDEX_TYPE, getIdForUser(USER_DOC_TYPE, deleteUserRequest.username())).request();
request.indicesOptions().ignoreUnavailable();
request.setRefreshPolicy(deleteUserRequest.getRefreshPolicy());
client.delete(request, new ActionListener<DeleteResponse>() {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, new ActionListener<DeleteResponse>() {
@Override
public void onResponse(DeleteResponse deleteResponse) {
clearRealmCache(deleteUserRequest.username(), listener,
@ -504,7 +539,7 @@ public class NativeUsersStore extends AbstractComponent {
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}, client::delete);
} catch (Exception e) {
logger.error("unable to remove user", e);
listener.onFailure(e);
@ -539,8 +574,10 @@ public class NativeUsersStore extends AbstractComponent {
"the upgrade API is run on the security index"));
return;
}
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME, INDEX_TYPE, getIdForUser(RESERVED_USER_TYPE, username))
.execute(new ActionListener<GetResponse>() {
.request(),
new ActionListener<GetResponse>() {
@Override
public void onResponse(GetResponse getResponse) {
if (getResponse.isExists()) {
@ -565,17 +602,15 @@ public class NativeUsersStore extends AbstractComponent {
@Override
public void onFailure(Exception e) {
if (e instanceof IndexNotFoundException) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage(
logger.trace((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
"could not retrieve built in user [{}] info since security index does not exist", username), e);
listener.onResponse(null);
} else {
logger.error(
(Supplier<?>) () -> new ParameterizedMessage(
"failed to retrieve built in user [{}] info", username), e);
logger.error(new ParameterizedMessage("failed to retrieve built in user [{}] info", username), e);
listener.onFailure(null);
}
}
});
}, client::get);
}
void getAllReservedUserInfo(ActionListener<Map<String, ReservedUserInfo>> listener) {
@ -585,15 +620,16 @@ public class NativeUsersStore extends AbstractComponent {
"the upgrade API is run on the security index"));
return;
}
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME)
.setQuery(QueryBuilders.termQuery(Fields.TYPE.getPreferredName(), RESERVED_USER_TYPE))
.setFetchSource(true)
.execute(new ActionListener<SearchResponse>() {
.setFetchSource(true).request(),
new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse searchResponse) {
Map<String, ReservedUserInfo> userInfos = new HashMap<>();
assert searchResponse.getHits().getTotalHits() <= 10 : "there are more than 10 reserved users we need to change " +
"this to retrieve them all!";
assert searchResponse.getHits().getTotalHits() <= 10 :
"there are more than 10 reserved users we need to change this to retrieve them all!";
for (SearchHit searchHit : searchResponse.getHits().getHits()) {
Map<String, Object> sourceMap = searchHit.getSourceAsMap();
String password = (String) sourceMap.get(Fields.PASSWORD.getPreferredName());
@ -625,14 +661,15 @@ public class NativeUsersStore extends AbstractComponent {
listener.onFailure(e);
}
}
});
}, client::search);
}
private <Response> void clearRealmCache(String username, ActionListener<Response> listener, Response response) {
SecurityClient securityClient = new SecurityClient(client);
ClearRealmCacheRequest request = securityClient.prepareClearRealmCache()
.usernames(username).request();
securityClient.clearRealmCache(request, new ActionListener<ClearRealmCacheResponse>() {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request,
new ActionListener<ClearRealmCacheResponse>() {
@Override
public void onResponse(ClearRealmCacheResponse nodes) {
listener.onResponse(response);
@ -640,12 +677,12 @@ public class NativeUsersStore extends AbstractComponent {
@Override
public void onFailure(Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("unable to clear realm cache for user [{}]", username), e);
logger.error(new ParameterizedMessage("unable to clear realm cache for user [{}]", username), e);
ElasticsearchException exception = new ElasticsearchException("clearing the cache for [" + username
+ "] failed. please clear the realm cache manually", e);
listener.onFailure(exception);
}
});
}, securityClient::clearRealmCache);
}
@Nullable
@ -668,7 +705,7 @@ public class NativeUsersStore extends AbstractComponent {
Map<String, Object> metadata = (Map<String, Object>) sourceMap.get(Fields.METADATA.getPreferredName());
return new UserAndPassword(new User(username, roles, fullName, email, metadata, enabled), password.toCharArray());
} catch (Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("error in the format of data for user [{}]", username), e);
logger.error(new ParameterizedMessage("error in the format of data for user [{}]", username), e);
return null;
}
}

View File

@ -5,6 +5,36 @@
*/
package org.elasticsearch.xpack.security.authc.support.mapper;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.ContextPreservingActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.CheckedBiConsumer;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.security.ScrollHelper;
import org.elasticsearch.xpack.security.SecurityLifecycleService;
import org.elasticsearch.xpack.security.action.realm.ClearRealmCacheResponse;
import org.elasticsearch.xpack.security.action.rolemapping.DeleteRoleMappingRequest;
import org.elasticsearch.xpack.security.action.rolemapping.PutRoleMappingRequest;
import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm;
import org.elasticsearch.xpack.security.authc.support.UserRoleMapper;
import org.elasticsearch.xpack.security.client.SecurityClient;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
@ -15,40 +45,16 @@ import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.common.CheckedBiConsumer;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.security.InternalSecurityClient;
import org.elasticsearch.xpack.security.SecurityLifecycleService;
import org.elasticsearch.xpack.security.action.rolemapping.DeleteRoleMappingRequest;
import org.elasticsearch.xpack.security.action.rolemapping.PutRoleMappingRequest;
import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm;
import org.elasticsearch.xpack.security.authc.support.UserRoleMapper;
import org.elasticsearch.xpack.security.client.SecurityClient;
import static org.elasticsearch.action.DocWriteResponse.Result.CREATED;
import static org.elasticsearch.action.DocWriteResponse.Result.DELETED;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.ClientHelper.SECURITY_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME;
/**
@ -71,12 +77,12 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
private static final String SECURITY_GENERIC_TYPE = "doc";
private final InternalSecurityClient client;
private final Client client;
private final boolean isTribeNode;
private final SecurityLifecycleService securityLifecycleService;
private final List<String> realmsToRefresh = new CopyOnWriteArrayList<>();
public NativeRoleMappingStore(Settings settings, InternalSecurityClient client, SecurityLifecycleService securityLifecycleService) {
public NativeRoleMappingStore(Settings settings, Client client, SecurityLifecycleService securityLifecycleService) {
super(settings);
this.client = client;
this.isTribeNode = XPackPlugin.isTribeNode(settings);
@ -104,6 +110,8 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
return;
}
final QueryBuilder query = QueryBuilders.termQuery(DOC_TYPE_FIELD, DOC_TYPE_ROLE_MAPPING);
final Supplier<ThreadContext.StoredContext> supplier = client.threadPool().getThreadContext().newRestorableContext(false);
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN)) {
SearchRequest request = client.prepareSearch(SECURITY_INDEX_NAME)
.setScroll(TimeValue.timeValueSeconds(10L))
.setTypes(SECURITY_GENERIC_TYPE)
@ -112,15 +120,17 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
.setFetchSource(true)
.request();
request.indicesOptions().ignoreUnavailable();
InternalClient.fetchAllByEntity(client, request, ActionListener.wrap((Collection<ExpressionRoleMapping> mappings) ->
ScrollHelper.fetchAllByEntity(client, request,
new ContextPreservingActionListener<>(supplier, ActionListener.wrap((Collection<ExpressionRoleMapping> mappings) ->
listener.onResponse(mappings.stream().filter(Objects::nonNull).collect(Collectors.toList())),
ex -> {
logger.error(new ParameterizedMessage("failed to load role mappings from index [{}] skipping all mappings.",
SECURITY_INDEX_NAME), ex);
listener.onResponse(Collections.emptyList());
}),
})),
doc -> buildMapping(getNameFromId(doc.getId()), doc.getSourceRef()));
}
}
private ExpressionRoleMapping buildMapping(String id, BytesReference source) {
try (XContentParser parser = getParser(source)) {
@ -179,10 +189,12 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
listener.onFailure(e);
return;
}
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareIndex(SECURITY_INDEX_NAME, SECURITY_GENERIC_TYPE, getIdForName(mapping.getName()))
.setSource(xContentBuilder)
.setRefreshPolicy(request.getRefreshPolicy())
.execute(new ActionListener<IndexResponse>() {
.request(),
new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse indexResponse) {
boolean created = indexResponse.getResult() == CREATED;
@ -194,7 +206,7 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
logger.error(new ParameterizedMessage("failed to put role-mapping [{}]", mapping.getName()), e);
listener.onFailure(e);
}
});
}, client::index);
});
}
@ -205,9 +217,11 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
"the upgrade API is run on the security index"));
return;
}
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareDelete(SECURITY_INDEX_NAME, SECURITY_GENERIC_TYPE, getIdForName(request.getName()))
.setRefreshPolicy(request.getRefreshPolicy())
.execute(new ActionListener<DeleteResponse>() {
.request(),
new ActionListener<DeleteResponse>() {
@Override
public void onResponse(DeleteResponse deleteResponse) {
@ -221,7 +235,7 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
listener.onFailure(e);
}
});
}, client::delete);
}
/**
@ -293,17 +307,20 @@ public class NativeRoleMappingStore extends AbstractComponent implements UserRol
private <Result> void refreshRealms(ActionListener<Result> listener, Result result) {
String[] realmNames = this.realmsToRefresh.toArray(new String[realmsToRefresh.size()]);
new SecurityClient(this.client).prepareClearRealmCache().realms(realmNames).execute(ActionListener.wrap(
final SecurityClient securityClient = new SecurityClient(client);
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
securityClient.prepareClearRealmCache().realms(realmNames).request(),
ActionListener.<ClearRealmCacheResponse>wrap(
response -> {
logger.debug((Supplier<?>) () -> new ParameterizedMessage(
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
"Cleared cached in realms [{}] due to role mapping change", Arrays.toString(realmNames)));
listener.onResponse(result);
},
ex -> {
logger.warn("Failed to clear cache for realms [{}]", Arrays.toString(realmNames));
listener.onFailure(ex);
})
);
}),
securityClient::clearRealmCache);
}
@Override

View File

@ -5,17 +5,30 @@
*/
package org.elasticsearch.xpack.security.authz;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.xpack.ClientHelper;
import org.elasticsearch.xpack.security.SecurityContext;
import org.elasticsearch.xpack.security.authc.Authentication;
import org.elasticsearch.xpack.security.authz.permission.Role;
import org.elasticsearch.xpack.security.support.Automatons;
import org.elasticsearch.xpack.security.user.SystemUser;
import org.elasticsearch.xpack.security.user.XPackSecurityUser;
import org.elasticsearch.xpack.security.user.XPackUser;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Predicate;
import static org.elasticsearch.xpack.ClientHelper.DEPRECATION_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.MONITORING_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.PERSISTENT_TASK_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.SECURITY_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.WATCHER_ORIGIN;
public final class AuthorizationUtils {
private static final Predicate<String> INTERNAL_PREDICATE = Automatons.predicate("internal:*");
@ -38,26 +51,71 @@ public final class AuthorizationUtils {
* @return true if the system user should be used to execute a request
*/
public static boolean shouldReplaceUserWithSystem(ThreadContext threadContext, String action) {
// the action must be internal OR the thread context must be a system context.
if (threadContext.isSystemContext() == false && isInternalAction(action) == false) {
return false;
}
// there is no authentication object AND we are executing in a system context OR an internal action
// AND there
Authentication authentication = threadContext.getTransient(Authentication.AUTHENTICATION_KEY);
if (authentication == null) {
if (authentication == null && threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME) == null) {
return true;
}
// we have a internal action being executed by a user that is not the system user, lets verify that there is a
// originating action that is not a internal action
// we have a internal action being executed by a user other than the system user, lets verify that there is a
// originating action that is not a internal action. We verify that there must be a originating action as an
// internal action should never be called by user code from a client
final String originatingAction = threadContext.getTransient(AuthorizationService.ORIGINATING_ACTION_KEY);
if (originatingAction != null && isInternalAction(originatingAction) == false) {
return true;
}
// either there was no originating action or it was a internal action, we should not replace under these circumstances
// either there was no originating action or the originating action was an internal action,
// we should not replace under these circumstances
return false;
}
/**
* Returns true if the thread context contains the origin of the action and does not have any authentication
*/
public static boolean shouldSetUserBasedOnActionOrigin(ThreadContext context) {
final String actionOrigin = context.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME);
final Authentication authentication = context.getTransient(Authentication.AUTHENTICATION_KEY);
return actionOrigin != null && authentication == null;
}
/**
* Stashes the current context and executes the consumer as the proper user based on the origin of the action.
*
* This method knows nothing about listeners so it is important that callers ensure their listeners preserve their
* context and restore it appropriately.
*/
public static void switchUserBasedOnActionOriginAndExecute(ThreadContext threadContext, SecurityContext securityContext,
Consumer<ThreadContext.StoredContext> consumer) {
final String actionOrigin = threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME);
if (actionOrigin == null) {
assert false : "cannot switch user if there is no action origin";
throw new IllegalStateException("cannot switch user if there is no action origin");
}
switch (actionOrigin) {
case SECURITY_ORIGIN:
securityContext.executeAsUser(XPackSecurityUser.INSTANCE, consumer, Version.CURRENT);
break;
case WATCHER_ORIGIN:
case ML_ORIGIN:
case MONITORING_ORIGIN:
case DEPRECATION_ORIGIN:
case PERSISTENT_TASK_ORIGIN:
securityContext.executeAsUser(XPackUser.INSTANCE, consumer, Version.CURRENT);
break;
default:
assert false : "action.origin [" + actionOrigin + "] is unknown!";
throw new IllegalStateException("action.origin [" + actionOrigin + "] should always be a known value");
}
}
private static boolean isInternalAction(String action) {
return INTERNAL_PREDICATE.test(action);
}

View File

@ -7,19 +7,19 @@ package org.elasticsearch.xpack.security.authz.store;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.MultiSearchResponse;
import org.elasticsearch.action.search.MultiSearchResponse.Item;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.ContextPreservingActionListener;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.component.AbstractComponent;
@ -27,6 +27,7 @@ import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
@ -37,8 +38,7 @@ import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.license.LicenseUtils;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.security.InternalSecurityClient;
import org.elasticsearch.xpack.security.ScrollHelper;
import org.elasticsearch.xpack.security.SecurityLifecycleService;
import org.elasticsearch.xpack.security.action.role.ClearRolesCacheRequest;
import org.elasticsearch.xpack.security.action.role.ClearRolesCacheResponse;
@ -57,9 +57,13 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.function.Supplier;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.existsQuery;
import static org.elasticsearch.xpack.ClientHelper.SECURITY_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
import static org.elasticsearch.xpack.security.Security.setting;
import static org.elasticsearch.xpack.security.authz.RoleDescriptor.ROLE_TYPE;
@ -80,14 +84,14 @@ public class NativeRolesStore extends AbstractComponent {
TimeValue.timeValueMinutes(20), Property.NodeScope, Property.Deprecated);
private static final String ROLE_DOC_TYPE = "doc";
private final InternalSecurityClient client;
private final Client client;
private final XPackLicenseState licenseState;
private final boolean isTribeNode;
private SecurityClient securityClient;
private final SecurityLifecycleService securityLifecycleService;
public NativeRolesStore(Settings settings, InternalSecurityClient client, XPackLicenseState licenseState,
public NativeRolesStore(Settings settings, Client client, XPackLicenseState licenseState,
SecurityLifecycleService securityLifecycleService) {
super(settings);
this.client = client;
@ -118,6 +122,8 @@ public class NativeRolesStore extends AbstractComponent {
final String[] roleNames = Arrays.stream(names).map(s -> getIdForUser(s)).toArray(String[]::new);
query = QueryBuilders.boolQuery().filter(QueryBuilders.idsQuery(ROLE_DOC_TYPE).addIds(roleNames));
}
final Supplier<ThreadContext.StoredContext> supplier = client.threadPool().getThreadContext().newRestorableContext(false);
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN)) {
SearchRequest request = client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME)
.setScroll(TimeValue.timeValueSeconds(10L))
.setQuery(query)
@ -125,8 +131,9 @@ public class NativeRolesStore extends AbstractComponent {
.setFetchSource(true)
.request();
request.indicesOptions().ignoreUnavailable();
InternalClient.fetchAllByEntity(client, request, listener,
ScrollHelper.fetchAllByEntity(client, request, new ContextPreservingActionListener<>(supplier, listener),
(hit) -> transformRole(hit.getId(), hit.getSourceRef(), logger, licenseState));
}
} catch (Exception e) {
logger.error(new ParameterizedMessage("unable to retrieve roles {}", Arrays.toString(names)), e);
listener.onFailure(e);
@ -153,10 +160,12 @@ public class NativeRolesStore extends AbstractComponent {
DeleteRequest request = client.prepareDelete(SecurityLifecycleService.SECURITY_INDEX_NAME,
ROLE_DOC_TYPE, getIdForUser(deleteRoleRequest.name())).request();
request.setRefreshPolicy(deleteRoleRequest.getRefreshPolicy());
client.delete(request, new ActionListener<DeleteResponse>() {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request,
new ActionListener<DeleteResponse>() {
@Override
public void onResponse(DeleteResponse deleteResponse) {
clearRoleCache(deleteRoleRequest.name(), listener, deleteResponse.getResult() == DocWriteResponse.Result.DELETED);
clearRoleCache(deleteRoleRequest.name(), listener,
deleteResponse.getResult() == DocWriteResponse.Result.DELETED);
}
@Override
@ -164,7 +173,7 @@ public class NativeRolesStore extends AbstractComponent {
logger.error("failed to delete role from the index", e);
listener.onFailure(e);
}
});
}, client::delete);
} catch (IndexNotFoundException e) {
logger.trace("security index does not exist", e);
listener.onResponse(false);
@ -206,10 +215,12 @@ public class NativeRolesStore extends AbstractComponent {
listener.onFailure(e);
return;
}
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareIndex(SecurityLifecycleService.SECURITY_INDEX_NAME, ROLE_DOC_TYPE, getIdForUser(role.getName()))
.setSource(xContentBuilder)
.setRefreshPolicy(request.getRefreshPolicy())
.execute(new ActionListener<IndexResponse>() {
.request(),
new ActionListener<IndexResponse>() {
@Override
public void onResponse(IndexResponse indexResponse) {
final boolean created = indexResponse.getResult() == DocWriteResponse.Result.CREATED;
@ -218,13 +229,13 @@ public class NativeRolesStore extends AbstractComponent {
@Override
public void onFailure(Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to put role [{}]", request.name()), e);
logger.error(new ParameterizedMessage("failed to put role [{}]", request.name()), e);
listener.onFailure(e);
}
});
}, client::index);
});
} catch (Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("unable to put role [{}]", request.name()), e);
logger.error(new ParameterizedMessage("unable to put role [{}]", request.name()), e);
listener.onFailure(e);
}
}
@ -243,6 +254,7 @@ public class NativeRolesStore extends AbstractComponent {
"the upgrade API is run on the security index"));
return;
}
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareMultiSearch()
.add(client.prepareSearch(SecurityLifecycleService.SECURITY_INDEX_NAME)
.setQuery(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE))
@ -263,7 +275,8 @@ public class NativeRolesStore extends AbstractComponent {
.filter(existsQuery("indices.query")))
.setSize(0)
.setTerminateAfter(1))
.execute(new ActionListener<MultiSearchResponse>() {
.request(),
new ActionListener<MultiSearchResponse>() {
@Override
public void onResponse(MultiSearchResponse items) {
Item[] responses = items.getResponses();
@ -291,7 +304,7 @@ public class NativeRolesStore extends AbstractComponent {
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}, client::multiSearch);
}
}
@ -310,11 +323,11 @@ public class NativeRolesStore extends AbstractComponent {
public void onFailure(Exception e) {
// if the index or the shard is not there / available we just claim the role is not there
if (TransportActions.isShardNotAvailableException(e)) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to load role [{}] index not available",
roleId), e);
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () ->
new ParameterizedMessage("failed to load role [{}] index not available", roleId), e);
roleActionListener.onResponse(null);
} else {
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to load role [{}]", roleId), e);
logger.error(new ParameterizedMessage("failed to load role [{}]", roleId), e);
roleActionListener.onFailure(e);
}
}
@ -329,13 +342,16 @@ public class NativeRolesStore extends AbstractComponent {
"the upgrade API is run on the security index"));
return;
}
try {
GetRequest request = client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME,
ROLE_DOC_TYPE, getIdForUser(role)).request();
client.get(request, listener);
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN,
client.prepareGet(SecurityLifecycleService.SECURITY_INDEX_NAME,
ROLE_DOC_TYPE, getIdForUser(role)).request(),
listener,
client::get);
} catch (IndexNotFoundException e) {
logger.trace(
(Supplier<?>) () -> new ParameterizedMessage(
(org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage(
"unable to retrieve role [{}] since security index does not exist", role), e);
listener.onResponse(new GetResponse(
new GetResult(SecurityLifecycleService.SECURITY_INDEX_NAME, ROLE_DOC_TYPE,
@ -348,7 +364,8 @@ public class NativeRolesStore extends AbstractComponent {
private <Response> void clearRoleCache(final String role, ActionListener<Response> listener, Response response) {
ClearRolesCacheRequest request = new ClearRolesCacheRequest().names(role);
securityClient.clearRolesCache(request, new ActionListener<ClearRolesCacheResponse>() {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request,
new ActionListener<ClearRolesCacheResponse>() {
@Override
public void onResponse(ClearRolesCacheResponse nodes) {
listener.onResponse(response);
@ -356,12 +373,12 @@ public class NativeRolesStore extends AbstractComponent {
@Override
public void onFailure(Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("unable to clear cache for role [{}]", role), e);
logger.error(new ParameterizedMessage("unable to clear cache for role [{}]", role), e);
ElasticsearchException exception = new ElasticsearchException("clearing the cache for [" + role
+ "] failed. please clear the role cache manually", e);
listener.onFailure(exception);
}
});
}, securityClient::clearRolesCache);
}
@Nullable
@ -407,7 +424,7 @@ public class NativeRolesStore extends AbstractComponent {
}
} catch (Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("error in the format of data for role [{}]", name), e);
logger.error(new ParameterizedMessage("error in the format of data for role [{}]", name), e);
return null;
}
}

View File

@ -5,16 +5,6 @@
*/
package org.elasticsearch.xpack.security.support;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.BiConsumer;
import java.util.function.Predicate;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.ElasticsearchException;
@ -26,6 +16,7 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterIndexHealth;
@ -37,11 +28,22 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.xpack.security.InternalSecurityClient;
import org.elasticsearch.xpack.template.TemplateUtils;
import org.elasticsearch.xpack.upgrade.IndexUpgradeCheck;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.function.BiConsumer;
import java.util.function.Predicate;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING;
import static org.elasticsearch.xpack.ClientHelper.SECURITY_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME;
/**
@ -57,14 +59,14 @@ public class IndexLifecycleManager extends AbstractComponent {
private final String indexName;
private final String templateName;
private final InternalSecurityClient client;
private final Client client;
private final List<BiConsumer<ClusterIndexHealth, ClusterIndexHealth>> indexHealthChangeListeners = new CopyOnWriteArrayList<>();
private final List<BiConsumer<Boolean, Boolean>> indexOutOfDateListeners = new CopyOnWriteArrayList<>();
private volatile State indexState = new State(false, false, false, false, null);
public IndexLifecycleManager(Settings settings, InternalSecurityClient client, String indexName, String templateName) {
public IndexLifecycleManager(Settings settings, Client client, String indexName, String templateName) {
super(settings);
this.client = client;
this.indexName = indexName;
@ -291,7 +293,8 @@ public class IndexLifecycleManager extends AbstractComponent {
} else {
CreateIndexRequest request = new CreateIndexRequest(INTERNAL_SECURITY_INDEX);
request.alias(new Alias(SECURITY_INDEX_NAME));
client.admin().indices().create(request, new ActionListener<CreateIndexResponse>() {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request,
new ActionListener<CreateIndexResponse>() {
@Override
public void onResponse(CreateIndexResponse createIndexResponse) {
if (createIndexResponse.isAcknowledged()) {
@ -312,7 +315,7 @@ public class IndexLifecycleManager extends AbstractComponent {
listener.onFailure(e);
}
}
});
}, client.admin().indices()::create);
}
}

View File

@ -36,9 +36,7 @@ import org.elasticsearch.xpack.security.authc.AuthenticationService;
import org.elasticsearch.xpack.security.authz.AuthorizationService;
import org.elasticsearch.xpack.security.authz.AuthorizationUtils;
import org.elasticsearch.xpack.security.transport.netty4.SecurityNetty4Transport;
import org.elasticsearch.xpack.security.user.KibanaUser;
import org.elasticsearch.xpack.security.user.SystemUser;
import org.elasticsearch.xpack.security.user.User;
import org.elasticsearch.xpack.ssl.SSLService;
import java.util.Collections;
@ -111,14 +109,11 @@ public class SecurityServerTransportInterceptor extends AbstractComponent implem
securityContext.executeAsUser(SystemUser.INSTANCE, (original) -> sendWithUser(connection, action, request, options,
new ContextRestoreResponseHandler<>(threadPool.getThreadContext().wrapRestorable(original)
, handler), sender), minVersion);
} else if (reservedRealmEnabled && connection.getVersion().before(Version.V_5_2_0) &&
KibanaUser.NAME.equals(securityContext.getUser().principal())) {
final User kibanaUser = securityContext.getUser();
final User bwcKibanaUser = new User(kibanaUser.principal(), new String[] { "kibana" }, kibanaUser.fullName(),
kibanaUser.email(), kibanaUser.metadata(), kibanaUser.enabled());
securityContext.executeAsUser(bwcKibanaUser, (original) -> sendWithUser(connection, action, request, options,
new ContextRestoreResponseHandler<>(threadPool.getThreadContext().wrapRestorable(original),
handler), sender), connection.getVersion());
} else if (AuthorizationUtils.shouldSetUserBasedOnActionOrigin(threadPool.getThreadContext())) {
AuthorizationUtils.switchUserBasedOnActionOriginAndExecute(threadPool.getThreadContext(), securityContext,
(original) -> sendWithUser(connection, action, request, options,
new ContextRestoreResponseHandler<>(threadPool.getThreadContext().wrapRestorable(original)
, handler), sender));
} else if (securityContext.getAuthentication() != null &&
securityContext.getAuthentication().getVersion().equals(minVersion) == false) {
// re-write the authentication since we want the authentication version to match the version of the connection

View File

@ -17,13 +17,15 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.compress.NotXContentException;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import java.util.function.Predicate;
import java.util.regex.Pattern;
@ -37,6 +39,20 @@ public class TemplateUtils {
private TemplateUtils() {}
/**
* Loads a JSON template as a resource and puts it into the provided map
*/
public static void loadTemplateIntoMap(String resource, Map<String, IndexTemplateMetaData> map, String templateName, String version,
String versionProperty, Logger logger) {
final String template = loadTemplate(resource, version, versionProperty);
try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, template)) {
map.put(templateName, IndexTemplateMetaData.Builder.fromXContent(parser, templateName));
} catch (IOException e) {
// TODO: should we handle this with a thrown exception?
logger.error("Error loading template [{}] as part of metadata upgrading", templateName);
}
}
/**
* Loads a built-in template and returns its source.
*/
@ -89,6 +105,20 @@ public class TemplateUtils {
.replaceAll(version);
}
/**
* Checks if a versioned template exists, and if it exists checks if the version is greater than or equal to the current version.
* @param templateName Name of the index template
* @param state Cluster state
*/
public static boolean checkTemplateExistsAndVersionIsGTECurrentVersion(String templateName, ClusterState state) {
IndexTemplateMetaData templateMetaData = state.metaData().templates().get(templateName);
if (templateMetaData == null) {
return false;
}
return templateMetaData.version() != null && templateMetaData.version() >= Version.CURRENT.id;
}
/**
* Checks if a versioned template exists, and if it exists checks if it is up-to-date with current version.
* @param versionKey The property in the mapping's _meta field which stores the version info

View File

@ -5,8 +5,8 @@
*/
package org.elasticsearch.xpack.upgrade;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.xpack.security.InternalClient;
import java.util.Collection;
import java.util.Collections;
@ -30,6 +30,6 @@ public interface IndexUpgradeCheckFactory {
* <p>
* This method is called from {@link org.elasticsearch.plugins.Plugin#createComponents} method.
*/
IndexUpgradeCheck createCheck(InternalClient internalClient, ClusterService clusterService);
IndexUpgradeCheck createCheck(Client client, ClusterService clusterService);
}

View File

@ -24,8 +24,6 @@ import org.elasticsearch.rest.RestHandler;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.watcher.ResourceWatcherService;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.security.InternalSecurityClient;
import org.elasticsearch.xpack.upgrade.actions.IndexUpgradeAction;
import org.elasticsearch.xpack.upgrade.actions.IndexUpgradeInfoAction;
import org.elasticsearch.xpack.upgrade.rest.RestIndexUpgradeAction;
@ -48,7 +46,7 @@ public class Upgrade implements ActionPlugin {
private static final int EXPECTED_INDEX_FORMAT_VERSION = 6;
private final Settings settings;
private final List<BiFunction<InternalClient, ClusterService, IndexUpgradeCheck>> upgradeCheckFactories;
private final List<BiFunction<Client, ClusterService, IndexUpgradeCheck>> upgradeCheckFactories;
public Upgrade(Settings settings) {
this.settings = settings;
@ -58,10 +56,9 @@ public class Upgrade implements ActionPlugin {
public Collection<Object> createComponents(Client client, ClusterService clusterService, ThreadPool threadPool,
ResourceWatcherService resourceWatcherService, ScriptService scriptService,
NamedXContentRegistry xContentRegistry) {
final InternalSecurityClient internalSecurityClient = new InternalSecurityClient(settings, threadPool, client);
List<IndexUpgradeCheck> upgradeChecks = new ArrayList<>(upgradeCheckFactories.size());
for (BiFunction<InternalClient, ClusterService, IndexUpgradeCheck> checkFactory : upgradeCheckFactories) {
upgradeChecks.add(checkFactory.apply(internalSecurityClient, clusterService));
for (BiFunction<Client, ClusterService, IndexUpgradeCheck> checkFactory : upgradeCheckFactories) {
upgradeChecks.add(checkFactory.apply(client, clusterService));
}
return Collections.singletonList(new IndexUpgradeService(settings, Collections.unmodifiableList(upgradeChecks)));
}

View File

@ -9,6 +9,7 @@ import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.bootstrap.BootstrapCheck;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
@ -50,7 +51,6 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.XPackFeatureSet;
import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.XPackSettings;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.ssl.SSLService;
import org.elasticsearch.xpack.watcher.actions.ActionFactory;
import org.elasticsearch.xpack.watcher.actions.ActionRegistry;
@ -70,7 +70,6 @@ import org.elasticsearch.xpack.watcher.actions.slack.SlackAction;
import org.elasticsearch.xpack.watcher.actions.slack.SlackActionFactory;
import org.elasticsearch.xpack.watcher.actions.webhook.WebhookAction;
import org.elasticsearch.xpack.watcher.actions.webhook.WebhookActionFactory;
import org.elasticsearch.xpack.watcher.client.WatcherClient;
import org.elasticsearch.xpack.watcher.common.http.HttpClient;
import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate;
import org.elasticsearch.xpack.watcher.common.http.HttpSettings;
@ -245,7 +244,7 @@ public class Watcher implements ActionPlugin {
}
}
public Collection<Object> createComponents(Clock clock, ScriptService scriptService, InternalClient internalClient,
public Collection<Object> createComponents(Clock clock, ScriptService scriptService, Client client,
XPackLicenseState licenseState,
ThreadPool threadPool, ClusterService clusterService,
NamedXContentRegistry xContentRegistry, SSLService sslService) {
@ -260,6 +259,8 @@ public class Watcher implements ActionPlugin {
throw new UncheckedIOException(e);
}
new WatcherIndexTemplateRegistry(settings, clusterService, threadPool, client);
// http client
Map<String, HttpAuthFactory> httpAuthFactories = new HashMap<>();
httpAuthFactories.put(BasicAuth.TYPE, new BasicAuthFactory(cryptoService));
@ -295,14 +296,14 @@ public class Watcher implements ActionPlugin {
final ConditionRegistry conditionRegistry = new ConditionRegistry(Collections.unmodifiableMap(parsers), clock);
final Map<String, TransformFactory> transformFactories = new HashMap<>();
transformFactories.put(ScriptTransform.TYPE, new ScriptTransformFactory(settings, scriptService));
transformFactories.put(SearchTransform.TYPE, new SearchTransformFactory(settings, internalClient, xContentRegistry, scriptService));
transformFactories.put(SearchTransform.TYPE, new SearchTransformFactory(settings, client, xContentRegistry, scriptService));
final TransformRegistry transformRegistry = new TransformRegistry(settings, Collections.unmodifiableMap(transformFactories));
// actions
final Map<String, ActionFactory> actionFactoryMap = new HashMap<>();
actionFactoryMap.put(EmailAction.TYPE, new EmailActionFactory(settings, emailService, templateEngine, emailAttachmentsParser));
actionFactoryMap.put(WebhookAction.TYPE, new WebhookActionFactory(settings, httpClient, httpTemplateParser, templateEngine));
actionFactoryMap.put(IndexAction.TYPE, new IndexActionFactory(settings, internalClient));
actionFactoryMap.put(IndexAction.TYPE, new IndexActionFactory(settings, client));
actionFactoryMap.put(LoggingAction.TYPE, new LoggingActionFactory(settings, templateEngine));
actionFactoryMap.put(HipChatAction.TYPE, new HipChatActionFactory(settings, templateEngine, hipChatService));
actionFactoryMap.put(JiraAction.TYPE, new JiraActionFactory(settings, templateEngine, jiraService));
@ -313,16 +314,14 @@ public class Watcher implements ActionPlugin {
// inputs
final Map<String, InputFactory> inputFactories = new HashMap<>();
inputFactories.put(SearchInput.TYPE,
new SearchInputFactory(settings, internalClient, xContentRegistry, scriptService));
new SearchInputFactory(settings, client, xContentRegistry, scriptService));
inputFactories.put(SimpleInput.TYPE, new SimpleInputFactory(settings));
inputFactories.put(HttpInput.TYPE, new HttpInputFactory(settings, httpClient, templateEngine, httpTemplateParser));
inputFactories.put(NoneInput.TYPE, new NoneInputFactory(settings));
final InputRegistry inputRegistry = new InputRegistry(settings, inputFactories);
inputFactories.put(ChainInput.TYPE, new ChainInputFactory(settings, inputRegistry));
final WatcherClient watcherClient = new WatcherClient(internalClient);
final HistoryStore historyStore = new HistoryStore(settings, internalClient);
final HistoryStore historyStore = new HistoryStore(settings, client);
// schedulers
final Set<Schedule.Parser> scheduleParsers = new HashSet<>();
@ -344,7 +343,7 @@ public class Watcher implements ActionPlugin {
final TriggerService triggerService = new TriggerService(settings, triggerEngines);
final TriggeredWatch.Parser triggeredWatchParser = new TriggeredWatch.Parser(settings, triggerService);
final TriggeredWatchStore triggeredWatchStore = new TriggeredWatchStore(settings, internalClient, triggeredWatchParser);
final TriggeredWatchStore triggeredWatchStore = new TriggeredWatchStore(settings, client, triggeredWatchParser);
final WatcherSearchTemplateService watcherSearchTemplateService =
new WatcherSearchTemplateService(settings, scriptService, xContentRegistry);
@ -352,16 +351,13 @@ public class Watcher implements ActionPlugin {
final Watch.Parser watchParser = new Watch.Parser(settings, triggerService, registry, inputRegistry, cryptoService, clock);
final ExecutionService executionService = new ExecutionService(settings, historyStore, triggeredWatchStore, watchExecutor,
clock, watchParser, clusterService, internalClient);
clock, watchParser, clusterService, client);
final Consumer<Iterable<TriggerEvent>> triggerEngineListener = getTriggerEngineListener(executionService);
triggerService.register(triggerEngineListener);
final WatcherIndexTemplateRegistry watcherIndexTemplateRegistry = new WatcherIndexTemplateRegistry(settings, clusterService,
threadPool, internalClient);
WatcherService watcherService = new WatcherService(settings, triggerService, triggeredWatchStore, executionService,
watchParser, internalClient);
watchParser, client);
final WatcherLifeCycleService watcherLifeCycleService =
new WatcherLifeCycleService(settings, threadPool, clusterService, watcherService);
@ -369,10 +365,9 @@ public class Watcher implements ActionPlugin {
listener = new WatcherIndexingListener(settings, watchParser, clock, triggerService);
clusterService.addListener(listener);
return Arrays.asList(registry, watcherClient, inputRegistry, historyStore, triggerService, triggeredWatchParser,
return Arrays.asList(registry, inputRegistry, historyStore, triggerService, triggeredWatchParser,
watcherLifeCycleService, executionService, triggerEngineListener, watcherService, watchParser,
configuredTriggerEngine, triggeredWatchStore, watcherSearchTemplateService, watcherIndexTemplateRegistry,
slackService, pagerDutyService, hipChatService);
configuredTriggerEngine, triggeredWatchStore, watcherSearchTemplateService, slackService, pagerDutyService, hipChatService);
}
protected TriggerEngine getTriggerEngine(Clock clock, ScheduleRegistry scheduleRegistry) {

View File

@ -26,11 +26,11 @@ import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.sort.SortBuilders;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.watcher.execution.ExecutionService;
import org.elasticsearch.xpack.watcher.execution.TriggeredWatch;
import org.elasticsearch.xpack.watcher.execution.TriggeredWatchStore;
@ -51,6 +51,8 @@ import java.util.stream.Collectors;
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
import static org.elasticsearch.xpack.ClientHelper.WATCHER_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
import static org.elasticsearch.xpack.watcher.support.Exceptions.illegalState;
import static org.elasticsearch.xpack.watcher.watch.Watch.INDEX;
@ -69,7 +71,7 @@ public class WatcherService extends AbstractComponent {
private final TimeValue defaultSearchTimeout;
public WatcherService(Settings settings, TriggerService triggerService, TriggeredWatchStore triggeredWatchStore,
ExecutionService executionService, Watch.Parser parser, InternalClient client) {
ExecutionService executionService, Watch.Parser parser, Client client) {
super(settings);
this.triggerService = triggerService;
this.triggeredWatchStore = triggeredWatchStore;
@ -200,6 +202,9 @@ public class WatcherService extends AbstractComponent {
return Collections.emptyList();
}
SearchResponse response = null;
List<Watch> watches = new ArrayList<>();
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) {
RefreshResponse refreshResponse = client.admin().indices().refresh(new RefreshRequest(INDEX))
.actionGet(TimeValue.timeValueSeconds(5));
if (refreshResponse.getSuccessfulShards() < indexMetaData.getNumberOfShards()) {
@ -218,8 +223,6 @@ public class WatcherService extends AbstractComponent {
// find out all allocation ids
List<ShardRouting> watchIndexShardRoutings = clusterState.getRoutingTable().allShards(watchIndexName);
List<Watch> watches = new ArrayList<>();
SearchRequest searchRequest = new SearchRequest(INDEX)
.scroll(scrollTimeout)
.preference(Preference.ONLY_LOCAL.toString())
@ -227,8 +230,8 @@ public class WatcherService extends AbstractComponent {
.size(scrollSize)
.sort(SortBuilders.fieldSort("_doc"))
.version(true));
SearchResponse response = client.search(searchRequest).actionGet(defaultSearchTimeout);
try {
response = client.search(searchRequest).actionGet(defaultSearchTimeout);
if (response.getTotalShards() != response.getSuccessfulShards()) {
throw new ElasticsearchException("Partial response while loading watches");
}
@ -283,10 +286,14 @@ public class WatcherService extends AbstractComponent {
response = client.searchScroll(request).actionGet(defaultSearchTimeout);
}
} finally {
if (response != null) {
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) {
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
clearScrollRequest.addScrollId(response.getScrollId());
client.clearScroll(clearScrollRequest).actionGet(scrollTimeout);
}
}
}
logger.debug("Loaded [{}] watches for execution", watches.size());

View File

@ -13,9 +13,9 @@ import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.xpack.watcher.actions.Action;
@ -35,6 +35,8 @@ import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.ClientHelper.WATCHER_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
import static org.elasticsearch.xpack.watcher.support.Exceptions.illegalState;
public class ExecutableIndexAction extends ExecutableAction<IndexAction> {
@ -103,7 +105,9 @@ public class ExecutableIndexAction extends ExecutableAction<IndexAction> {
XContentType.JSON));
}
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) {
response = client.index(indexRequest).get(indexDefaultTimeout.millis(), TimeUnit.MILLISECONDS);
}
try (XContentBuilder builder = jsonBuilder()) {
indexResponseToXContent(builder, response);
bytesReference = builder.bytes();
@ -136,6 +140,7 @@ public class ExecutableIndexAction extends ExecutableAction<IndexAction> {
}
bulkRequest.add(indexRequest);
}
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) {
BulkResponse bulkResponse = client.bulk(bulkRequest).get(bulkDefaultTimeout.millis(), TimeUnit.MILLISECONDS);
try (XContentBuilder jsonBuilder = jsonBuilder().startArray()) {
for (BulkItemResponse item : bulkResponse) {
@ -154,6 +159,7 @@ public class ExecutableIndexAction extends ExecutableAction<IndexAction> {
}
}
}
}
private Map<String, Object> addTimestampToDocument(Map<String, Object> data, DateTime executionTime) {
if (action.executionTimeField != null) {

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
@ -59,6 +60,8 @@ import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.elasticsearch.xpack.ClientHelper.WATCHER_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
import static org.joda.time.DateTimeZone.UTC;
public class ExecutionService extends AbstractComponent {
@ -355,11 +358,12 @@ public class ExecutionService extends AbstractComponent {
UpdateRequest updateRequest = new UpdateRequest(Watch.INDEX, Watch.DOC_TYPE, watch.id());
updateRequest.doc(source);
updateRequest.version(watch.version());
try {
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) {
client.update(updateRequest).actionGet(indexDefaultTimeout);
} catch (DocumentMissingException e) {
// do not rethrow this exception, otherwise the watch history will contain an exception
// even though the execution might have been fine
// TODO should we really just drop this exception on the floor?
}
}
@ -505,11 +509,13 @@ public class ExecutionService extends AbstractComponent {
* @return The GetResponse of calling the get API of this watch
*/
private GetResponse getWatch(String id) {
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) {
GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, id).preference(Preference.LOCAL.type()).realtime(true);
PlainActionFuture<GetResponse> future = PlainActionFuture.newFuture();
client.get(getRequest, future);
return future.actionGet();
}
}
public Map<String, Object> usageStats() {
Counters counters = new Counters();

View File

@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.Preference;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
@ -45,6 +46,9 @@ import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors;
import static org.elasticsearch.xpack.ClientHelper.WATCHER_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
import static org.elasticsearch.xpack.watcher.support.Exceptions.illegalState;
public class TriggeredWatchStore extends AbstractComponent {
@ -107,7 +111,8 @@ public class TriggeredWatchStore extends AbstractComponent {
}
ensureStarted();
client.bulk(createBulkRequest(triggeredWatches, DOC_TYPE), listener);
executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, createBulkRequest(triggeredWatches, DOC_TYPE),
listener, client::bulk);
}
public BulkResponse putAll(final List<TriggeredWatch> triggeredWatches) throws IOException {
@ -140,7 +145,9 @@ public class TriggeredWatchStore extends AbstractComponent {
public void delete(Wid wid) {
ensureStarted();
DeleteRequest request = new DeleteRequest(INDEX_NAME, DOC_TYPE, wid.value());
client.delete(request);
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) {
client.delete(request); // FIXME shouldn't we wait before saying the delete was successful
}
logger.trace("successfully deleted triggered watch with id [{}]", wid);
}
@ -170,7 +177,7 @@ public class TriggeredWatchStore extends AbstractComponent {
return Collections.emptyList();
}
try {
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) {
client.admin().indices().refresh(new RefreshRequest(TriggeredWatchStore.INDEX_NAME)).actionGet(TimeValue.timeValueSeconds(5));
} catch (IndexNotFoundException e) {
return Collections.emptyList();
@ -187,9 +194,10 @@ public class TriggeredWatchStore extends AbstractComponent {
.sort(SortBuilders.fieldSort("_doc"))
.version(true));
SearchResponse response = client.search(searchRequest).actionGet(defaultSearchTimeout);
SearchResponse response = null;
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) {
response = client.search(searchRequest).actionGet(defaultSearchTimeout);
logger.debug("trying to find triggered watches for ids {}: found [{}] docs", ids, response.getHits().getTotalHits());
try {
while (response.getHits().getHits().length != 0) {
for (SearchHit hit : response.getHits()) {
Wid wid = new Wid(hit.getId());
@ -203,10 +211,14 @@ public class TriggeredWatchStore extends AbstractComponent {
response = client.searchScroll(request).actionGet(defaultSearchTimeout);
}
} finally {
if (response != null) {
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) {
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
clearScrollRequest.addScrollId(response.getScrollId());
client.clearScroll(clearScrollRequest).actionGet(scrollTimeout);
}
}
}
return triggeredWatches;
}

View File

@ -13,6 +13,7 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.engine.VersionConflictEngineException;
@ -34,6 +35,8 @@ import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import static org.elasticsearch.xpack.ClientHelper.WATCHER_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
import static org.elasticsearch.xpack.watcher.support.Exceptions.ioException;
public class HistoryStore extends AbstractComponent {
@ -79,7 +82,8 @@ public class HistoryStore extends AbstractComponent {
}
String index = getHistoryIndexNameForTime(watchRecord.triggerEvent().triggeredTime());
putUpdateLock.lock();
try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
try (XContentBuilder builder = XContentFactory.jsonBuilder();
ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) {
watchRecord.toXContent(builder, WatcherParams.builder().hideSecrets(true).build());
IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value())
@ -105,7 +109,8 @@ public class HistoryStore extends AbstractComponent {
String index = getHistoryIndexNameForTime(watchRecord.triggerEvent().triggeredTime());
putUpdateLock.lock();
try {
try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
try (XContentBuilder builder = XContentFactory.jsonBuilder();
ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) {
watchRecord.toXContent(builder, WatcherParams.builder().hideSecrets(true).build());
IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value())
@ -116,7 +121,8 @@ public class HistoryStore extends AbstractComponent {
} catch (VersionConflictEngineException vcee) {
watchRecord = new WatchRecord.MessageWatchRecord(watchRecord, ExecutionState.EXECUTED_MULTIPLE_TIMES,
"watch record [{ " + watchRecord.id() + " }] has been stored before, previous state [" + watchRecord.state() + "]");
try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder()) {
try (XContentBuilder xContentBuilder = XContentFactory.jsonBuilder();
ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) {
IndexRequest request = new IndexRequest(index, DOC_TYPE, watchRecord.id().value())
.source(xContentBuilder.value(watchRecord));
client.index(request).get(30, TimeUnit.SECONDS);

View File

@ -12,6 +12,7 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
@ -27,6 +28,8 @@ import org.elasticsearch.xpack.watcher.watch.Payload;
import java.util.Map;
import static org.elasticsearch.xpack.ClientHelper.WATCHER_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
import static org.elasticsearch.xpack.watcher.input.search.SearchInput.TYPE;
/**
@ -67,7 +70,10 @@ public class ExecutableSearchInput extends ExecutableInput<SearchInput, SearchIn
logger.trace("[{}] running query for [{}] [{}]", ctx.id(), ctx.watch().id(), request.getSearchSource().utf8ToString());
}
SearchResponse response = client.search(searchTemplateService.toSearchRequest(request)).actionGet(timeout);
SearchResponse response;
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) {
response = client.search(searchTemplateService.toSearchRequest(request)).actionGet(timeout);
}
if (logger.isDebugEnabled()) {
logger.debug("[{}] found [{}] hits", ctx.id(), response.getHits().getTotalHits());

View File

@ -9,6 +9,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
@ -20,7 +21,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.template.TemplateUtils;
import java.nio.charset.StandardCharsets;
@ -30,6 +30,9 @@ import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.regex.Pattern;
import static org.elasticsearch.xpack.ClientHelper.WATCHER_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
public class WatcherIndexTemplateRegistry extends AbstractComponent implements ClusterStateListener {
// history (please add a comment why you increased the version here)
@ -56,12 +59,12 @@ public class WatcherIndexTemplateRegistry extends AbstractComponent implements C
TEMPLATE_CONFIG_TRIGGERED_WATCHES, TEMPLATE_CONFIG_WATCH_HISTORY, TEMPLATE_CONFIG_WATCHES
};
private final InternalClient client;
private final Client client;
private final ThreadPool threadPool;
private final TemplateConfig[] indexTemplates;
private final ConcurrentMap<String, AtomicBoolean> templateCreationsInProgress = new ConcurrentHashMap<>();
public WatcherIndexTemplateRegistry(Settings settings, ClusterService clusterService, ThreadPool threadPool, InternalClient client) {
public WatcherIndexTemplateRegistry(Settings settings, ClusterService clusterService, ThreadPool threadPool, Client client) {
super(settings);
this.client = client;
this.threadPool = threadPool;
@ -112,7 +115,8 @@ public class WatcherIndexTemplateRegistry extends AbstractComponent implements C
PutIndexTemplateRequest request = new PutIndexTemplateRequest(templateName).source(config.load(), XContentType.JSON);
request.masterNodeTimeout(TimeValue.timeValueMinutes(1));
client.admin().indices().putTemplate(request, new ActionListener<PutIndexTemplateResponse>() {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, request,
new ActionListener<PutIndexTemplateResponse>() {
@Override
public void onResponse(PutIndexTemplateResponse response) {
creationCheck.set(false);
@ -126,7 +130,7 @@ public class WatcherIndexTemplateRegistry extends AbstractComponent implements C
creationCheck.set(false);
logger.error(new ParameterizedMessage("Error adding watcher template [{}]", templateName), e);
}
});
}, client.admin().indices()::putTemplate);
});
}

View File

@ -8,9 +8,11 @@ package org.elasticsearch.xpack.watcher.transport.actions.ack;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.Preference;
@ -22,7 +24,6 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.watcher.actions.ActionWrapper;
import org.elasticsearch.xpack.watcher.transport.actions.WatcherTransportAction;
import org.elasticsearch.xpack.watcher.watch.Watch;
@ -33,6 +34,8 @@ import java.util.Arrays;
import java.util.List;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.ClientHelper.WATCHER_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.joda.time.DateTimeZone.UTC;
public class TransportAckWatchAction extends WatcherTransportAction<AckWatchRequest, AckWatchResponse> {
@ -44,7 +47,7 @@ public class TransportAckWatchAction extends WatcherTransportAction<AckWatchRequ
@Inject
public TransportAckWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Clock clock, XPackLicenseState licenseState,
Watch.Parser parser, InternalClient client) {
Watch.Parser parser, Client client) {
super(settings, AckWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver,
licenseState, AckWatchRequest::new);
this.clock = clock;
@ -57,12 +60,14 @@ public class TransportAckWatchAction extends WatcherTransportAction<AckWatchRequ
GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, request.getWatchId())
.preference(Preference.LOCAL.type()).realtime(true);
client.get(getRequest, ActionListener.wrap((response) -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, getRequest,
ActionListener.<GetResponse>wrap((response) -> {
if (response.isExists() == false) {
listener.onFailure(new ResourceNotFoundException("Watch with id [{}] does not exist", request.getWatchId()));
} else {
DateTime now = new DateTime(clock.millis(), UTC);
Watch watch = parser.parseWithSecrets(request.getWatchId(), true, response.getSourceAsBytesRef(), now, XContentType.JSON);
Watch watch =
parser.parseWithSecrets(request.getWatchId(), true, response.getSourceAsBytesRef(), now, XContentType.JSON);
watch.version(response.getVersion());
watch.status().version(response.getVersion());
String[] actionIds = request.getActionIds();
@ -99,10 +104,11 @@ public class TransportAckWatchAction extends WatcherTransportAction<AckWatchRequ
builder.endObject().endObject().endObject();
updateRequest.doc(builder);
client.update(updateRequest, ActionListener.wrap(
executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, updateRequest,
ActionListener.<UpdateResponse>wrap(
(updateResponse) -> listener.onResponse(new AckWatchResponse(watch.status())),
listener::onFailure));
listener::onFailure), client::update);
}
}, listener::onFailure));
}, listener::onFailure), client::get);
}
}

View File

@ -8,9 +8,11 @@ package org.elasticsearch.xpack.watcher.transport.actions.activate;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.Preference;
@ -21,7 +23,6 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.watcher.transport.actions.WatcherTransportAction;
import org.elasticsearch.xpack.watcher.watch.Watch;
import org.elasticsearch.xpack.watcher.watch.WatchStatus;
@ -31,6 +32,8 @@ import java.io.IOException;
import java.time.Clock;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.ClientHelper.WATCHER_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.elasticsearch.xpack.watcher.support.WatcherDateTimeUtils.writeDate;
import static org.joda.time.DateTimeZone.UTC;
@ -46,7 +49,7 @@ public class TransportActivateWatchAction extends WatcherTransportAction<Activat
@Inject
public TransportActivateWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Clock clock,
XPackLicenseState licenseState, Watch.Parser parser, InternalClient client) {
XPackLicenseState licenseState, Watch.Parser parser, Client client) {
super(settings, ActivateWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver,
licenseState, ActivateWatchRequest::new);
this.clock = clock;
@ -67,10 +70,13 @@ public class TransportActivateWatchAction extends WatcherTransportAction<Activat
// once per second?
updateRequest.retryOnConflict(2);
client.update(updateRequest, ActionListener.wrap(updateResponse -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, updateRequest,
ActionListener.<UpdateResponse>wrap(updateResponse -> {
GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, request.getWatchId())
.preference(Preference.LOCAL.type()).realtime(true);
client.get(getRequest, ActionListener.wrap(getResponse -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, getRequest,
ActionListener.<GetResponse>wrap(getResponse -> {
if (getResponse.isExists()) {
Watch watch = parser.parseWithSecrets(request.getWatchId(), true, getResponse.getSourceAsBytesRef(), now,
XContentType.JSON);
@ -78,10 +84,11 @@ public class TransportActivateWatchAction extends WatcherTransportAction<Activat
watch.status().version(getResponse.getVersion());
listener.onResponse(new ActivateWatchResponse(watch.status()));
} else {
listener.onFailure(new ResourceNotFoundException("Watch with id [{}] does not exist", request.getWatchId()));
listener.onFailure(new ResourceNotFoundException("Watch with id [{}] does not exist",
request.getWatchId()));
}
}, listener::onFailure));
}, listener::onFailure));
}, listener::onFailure), client::get);
}, listener::onFailure), client::update);
} catch (IOException e) {
listener.onFailure(e);
}

View File

@ -8,6 +8,7 @@ package org.elasticsearch.xpack.watcher.transport.actions.delete;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.WriteRequest;
@ -17,9 +18,11 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.watcher.watch.Watch;
import static org.elasticsearch.xpack.ClientHelper.WATCHER_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
/**
* Performs the delete operation. This inherits directly from HandledTransportAction, because deletion should always work
* independently from the license check in WatcherTransportAction!
@ -31,7 +34,7 @@ public class TransportDeleteWatchAction extends HandledTransportAction<DeleteWat
@Inject
public TransportDeleteWatchAction(Settings settings, TransportService transportService,ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
InternalClient client) {
Client client) {
super(settings, DeleteWatchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
DeleteWatchRequest::new);
this.client = client;
@ -41,11 +44,11 @@ public class TransportDeleteWatchAction extends HandledTransportAction<DeleteWat
protected void doExecute(DeleteWatchRequest request, ActionListener<DeleteWatchResponse> listener) {
DeleteRequest deleteRequest = new DeleteRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId());
deleteRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
client.delete(deleteRequest, ActionListener.wrap(deleteResponse -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, deleteRequest,
ActionListener.<DeleteResponse>wrap(deleteResponse -> {
boolean deleted = deleteResponse.getResult() == DocWriteResponse.Result.DELETED;
DeleteWatchResponse response = new DeleteWatchResponse(deleteResponse.getId(), deleteResponse.getVersion(), deleted);
listener.onResponse(response);
},
listener::onFailure));
}, listener::onFailure), client::delete);
}
}

View File

@ -6,10 +6,10 @@
package org.elasticsearch.xpack.watcher.transport.actions.execute;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -23,7 +23,6 @@ import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.watcher.condition.AlwaysCondition;
import org.elasticsearch.xpack.watcher.execution.ActionExecutionMode;
import org.elasticsearch.xpack.watcher.execution.ExecutionService;
@ -43,6 +42,8 @@ import java.io.IOException;
import java.time.Clock;
import java.util.Map;
import static org.elasticsearch.xpack.ClientHelper.WATCHER_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.joda.time.DateTimeZone.UTC;
/**
@ -60,7 +61,7 @@ public class TransportExecuteWatchAction extends WatcherTransportAction<ExecuteW
public TransportExecuteWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
ExecutionService executionService, Clock clock, XPackLicenseState licenseState,
Watch.Parser watchParser, InternalClient client, TriggerService triggerService) {
Watch.Parser watchParser, Client client, TriggerService triggerService) {
super(settings, ExecuteWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver,
licenseState, ExecuteWatchRequest::new);
this.executionService = executionService;
@ -76,16 +77,18 @@ public class TransportExecuteWatchAction extends WatcherTransportAction<ExecuteW
GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId())
.preference(Preference.LOCAL.type()).realtime(true);
client.get(getRequest, ActionListener.wrap(response -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, getRequest,
ActionListener.<GetResponse>wrap(response -> {
if (response.isExists()) {
Watch watch = watchParser.parse(request.getId(), true, response.getSourceAsBytesRef(), request.getXContentType());
Watch watch =
watchParser.parse(request.getId(), true, response.getSourceAsBytesRef(), request.getXContentType());
watch.version(response.getVersion());
watch.status().version(response.getVersion());
executeWatch(request, listener, watch, true);
} else {
listener.onFailure(new ResourceNotFoundException("Watch with id [{}] does not exist", request.getId()));
}
}, listener::onFailure));
}, listener::onFailure), client::get);
} else if (request.getWatchSource() != null) {
try {
assert !request.isRecordExecution();
@ -93,7 +96,7 @@ public class TransportExecuteWatchAction extends WatcherTransportAction<ExecuteW
request.getXContentType());
executeWatch(request, listener, watch, false);
} catch (IOException e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to parse [{}]", request.getId()), e);
logger.error(new ParameterizedMessage("failed to parse [{}]", request.getId()), e);
listener.onFailure(e);
}
} else {

View File

@ -7,6 +7,7 @@ package org.elasticsearch.xpack.watcher.transport.actions.get;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -19,7 +20,6 @@ import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.watcher.support.xcontent.WatcherParams;
import org.elasticsearch.xpack.watcher.transport.actions.WatcherTransportAction;
import org.elasticsearch.xpack.watcher.watch.Watch;
@ -28,6 +28,8 @@ import org.joda.time.DateTime;
import java.time.Clock;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.ClientHelper.WATCHER_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.joda.time.DateTimeZone.UTC;
public class TransportGetWatchAction extends WatcherTransportAction<GetWatchRequest, GetWatchResponse> {
@ -39,7 +41,7 @@ public class TransportGetWatchAction extends WatcherTransportAction<GetWatchRequ
@Inject
public TransportGetWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, XPackLicenseState licenseState,
Watch.Parser parser, Clock clock, InternalClient client) {
Watch.Parser parser, Clock clock, Client client) {
super(settings, GetWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver,
licenseState, GetWatchRequest::new);
this.parser = parser;
@ -52,14 +54,16 @@ public class TransportGetWatchAction extends WatcherTransportAction<GetWatchRequ
GetRequest getRequest = new GetRequest(Watch.INDEX, Watch.DOC_TYPE, request.getId())
.preference(Preference.LOCAL.type()).realtime(true);
client.get(getRequest, ActionListener.wrap(getResponse -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, getRequest,
ActionListener.<GetResponse>wrap(getResponse -> {
if (getResponse.isExists()) {
try (XContentBuilder builder = jsonBuilder()) {
// When we return the watch via the Get Watch REST API, we want to return the watch as was specified in the put api,
// we don't include the status in the watch source itself, but as a separate top level field, so that
// it indicates the the status is managed by watcher itself.
// When we return the watch via the Get Watch REST API, we want to return the watch as was specified in
// the put api, we don't include the status in the watch source itself, but as a separate top level field,
// so that it indicates the the status is managed by watcher itself.
DateTime now = new DateTime(clock.millis(), UTC);
Watch watch = parser.parseWithSecrets(request.getId(), true, getResponse.getSourceAsBytesRef(), now, XContentType.JSON);
Watch watch = parser.parseWithSecrets(request.getId(), true, getResponse.getSourceAsBytesRef(), now,
XContentType.JSON);
watch.toXContent(builder, WatcherParams.builder()
.hideSecrets(true)
.put(Watch.INCLUDE_STATUS_KEY, false)
@ -72,12 +76,13 @@ public class TransportGetWatchAction extends WatcherTransportAction<GetWatchRequ
listener.onResponse(new GetWatchResponse(request.getId()));
}
}, e -> {
// special case. This API should not care if the index is missing or not, it should respond with the watch not being found
// special case. This API should not care if the index is missing or not,
// it should respond with the watch not being found
if (e instanceof IndexNotFoundException) {
listener.onResponse(new GetWatchResponse(request.getId()));
} else {
listener.onFailure(e);
}
}));
}), client::get);
}
}

View File

@ -8,8 +8,10 @@ package org.elasticsearch.xpack.watcher.transport.actions.put;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject;
@ -19,7 +21,6 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.watcher.support.xcontent.WatcherParams;
import org.elasticsearch.xpack.watcher.transport.actions.WatcherTransportAction;
import org.elasticsearch.xpack.watcher.watch.Payload;
@ -29,18 +30,20 @@ import org.joda.time.DateTime;
import java.time.Clock;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.ClientHelper.WATCHER_ORIGIN;
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
import static org.joda.time.DateTimeZone.UTC;
public class TransportPutWatchAction extends WatcherTransportAction<PutWatchRequest, PutWatchResponse> {
private final Clock clock;
private final Watch.Parser parser;
private final InternalClient client;
private final Client client;
@Inject
public TransportPutWatchAction(Settings settings, TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Clock clock, XPackLicenseState licenseState,
Watch.Parser parser, InternalClient client) {
Watch.Parser parser, Client client) {
super(settings, PutWatchAction.NAME, transportService, threadPool, actionFilters, indexNameExpressionResolver,
licenseState, PutWatchRequest::new);
this.clock = clock;
@ -64,10 +67,12 @@ public class TransportPutWatchAction extends WatcherTransportAction<PutWatchRequ
indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
indexRequest.source(bytesReference, XContentType.JSON);
client.index(indexRequest, ActionListener.wrap(indexResponse -> {
executeAsyncWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN, indexRequest,
ActionListener.<IndexResponse>wrap(indexResponse -> {
boolean created = indexResponse.getResult() == DocWriteResponse.Result.CREATED;
listener.onResponse(new PutWatchResponse(indexResponse.getId(), indexResponse.getVersion(), created));
}, listener::onFailure));
}, listener::onFailure),
client::index);
}
} catch (Exception e) {
listener.onFailure(e);

View File

@ -1,5 +1,5 @@
{
"index_patterns" : ".logstash",
"index_patterns" : [ ".logstash" ],
"settings": {
"index": {
"number_of_shards": 1,

View File

@ -1,5 +1,5 @@
{
"index_patterns": ".monitoring-alerts-${monitoring.template.version}",
"index_patterns": [ ".monitoring-alerts-${monitoring.template.version}" ],
"version": 7000001,
"settings": {
"index": {

View File

@ -1,5 +1,5 @@
{
"index_patterns": ".monitoring-beats-${monitoring.template.version}-*",
"index_patterns": [ ".monitoring-beats-${monitoring.template.version}-*" ],
"version": 7000001,
"settings": {
"index.number_of_shards": 1,

View File

@ -1,5 +1,5 @@
{
"index_patterns": ".monitoring-es-${monitoring.template.version}-*",
"index_patterns": [ ".monitoring-es-${monitoring.template.version}-*" ],
"version": 7000001,
"settings": {
"index.number_of_shards": 1,

View File

@ -1,5 +1,5 @@
{
"index_patterns": ".monitoring-kibana-${monitoring.template.version}-*",
"index_patterns": [ ".monitoring-kibana-${monitoring.template.version}-*" ],
"version": 7000001,
"settings": {
"index.number_of_shards": 1,

View File

@ -1,5 +1,5 @@
{
"index_patterns": ".monitoring-logstash-${monitoring.template.version}-*",
"index_patterns": [ ".monitoring-logstash-${monitoring.template.version}-*" ],
"version": 7000001,
"settings": {
"index.number_of_shards": 1,

View File

@ -1,5 +1,5 @@
{
"index_patterns": ".triggered_watches*",
"index_patterns": [ ".triggered_watches*" ],
"order": 2147483647,
"settings": {
"index.number_of_shards": 1,

View File

@ -1,5 +1,5 @@
{
"index_patterns": ".watcher-history-${xpack.watcher.template.version}*",
"index_patterns": [ ".watcher-history-${xpack.watcher.template.version}*" ],
"order": 2147483647,
"settings": {
"xpack.watcher.template.version": "${xpack.watcher.template.version}",

View File

@ -1,5 +1,5 @@
{
"index_patterns": ".watches*",
"index_patterns": [ ".watches*" ],
"order": 2147483647,
"settings": {
"index.number_of_shards": 1,

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.MockSecureSettings;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.index.Index;
import org.elasticsearch.plugins.Plugin;
@ -32,9 +33,8 @@ import org.elasticsearch.xpack.XPackClient;
import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.XPackSettings;
import org.elasticsearch.xpack.ml.MachineLearning;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.security.InternalSecurityClient;
import org.elasticsearch.xpack.security.Security;
import org.elasticsearch.xpack.security.authc.support.UsernamePasswordToken;
import org.elasticsearch.xpack.security.client.SecurityClient;
import org.junit.AfterClass;
import org.junit.Before;
@ -419,18 +419,6 @@ public abstract class SecurityIntegTestCase extends ESIntegTestCase {
return client -> (client instanceof NodeClient) ? client.filterWithHeader(headers) : client;
}
protected InternalClient internalClient() {
return internalCluster().getInstance(InternalClient.class);
}
protected InternalSecurityClient internalSecurityClient() {
return internalSecurityClient(client());
}
protected InternalSecurityClient internalSecurityClient(Client client) {
return new InternalSecurityClient(client.settings(), client.threadPool(), client);
}
protected SecurityClient securityClient() {
return securityClient(client());
}
@ -493,15 +481,17 @@ public abstract class SecurityIntegTestCase extends ESIntegTestCase {
}
protected void deleteSecurityIndex() {
final InternalSecurityClient securityClient = internalSecurityClient();
final Client client = client().filterWithHeader(Collections.singletonMap("Authorization",
UsernamePasswordToken.basicAuthHeaderValue(SecuritySettingsSource.TEST_SUPERUSER,
SecuritySettingsSource.TEST_PASSWORD_SECURE_STRING)));
GetIndexRequest getIndexRequest = new GetIndexRequest();
getIndexRequest.indices(SECURITY_INDEX_NAME);
getIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen());
GetIndexResponse getIndexResponse = securityClient.admin().indices().getIndex(getIndexRequest).actionGet();
GetIndexResponse getIndexResponse = client.admin().indices().getIndex(getIndexRequest).actionGet();
if (getIndexResponse.getIndices().length > 0) {
// this is a hack to clean up the .security index since only the XPack user can delete it
// this is a hack to clean up the .security index since only a superuser can delete it
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(getIndexResponse.getIndices());
securityClient.admin().indices().delete(deleteIndexRequest).actionGet();
client.admin().indices().delete(deleteIndexRequest).actionGet();
}
}

View File

@ -58,17 +58,20 @@ public class SecuritySettingsSource extends ClusterDiscoveryConfiguration.Unicas
public static final SecureString TEST_PASSWORD_SECURE_STRING = new SecureString("x-pack-test-password".toCharArray());
public static final String TEST_PASSWORD_HASHED = new String(Hasher.BCRYPT.hash(new SecureString(TEST_PASSWORD.toCharArray())));
public static final String TEST_ROLE = "user";
public static final String TEST_SUPERUSER = "test_superuser";
public static final String DEFAULT_TRANSPORT_CLIENT_ROLE = "transport_client";
public static final String DEFAULT_TRANSPORT_CLIENT_USER_NAME = "test_trans_client_user";
public static final String CONFIG_STANDARD_USER =
TEST_USER_NAME + ":" + TEST_PASSWORD_HASHED + "\n" +
DEFAULT_TRANSPORT_CLIENT_USER_NAME + ":" + TEST_PASSWORD_HASHED + "\n";
DEFAULT_TRANSPORT_CLIENT_USER_NAME + ":" + TEST_PASSWORD_HASHED + "\n" +
TEST_SUPERUSER + ":" + TEST_PASSWORD_HASHED + "\n";
public static final String CONFIG_STANDARD_USER_ROLES =
TEST_ROLE + ":" + TEST_USER_NAME + "," + DEFAULT_TRANSPORT_CLIENT_USER_NAME + "\n" +
DEFAULT_TRANSPORT_CLIENT_ROLE + ":" + DEFAULT_TRANSPORT_CLIENT_USER_NAME+ "\n";
DEFAULT_TRANSPORT_CLIENT_ROLE + ":" + DEFAULT_TRANSPORT_CLIENT_USER_NAME + "\n" +
"superuser:" + TEST_SUPERUSER + "\n";
public static final String CONFIG_ROLE_ALLOW_ALL =
TEST_ROLE + ":\n" +

View File

@ -0,0 +1,141 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.concurrent.CountDownLatch;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class ClientHelperTests extends ESTestCase {
public void testStashContext() {
final String origin = randomAlphaOfLengthBetween(4, 16);
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
final boolean setOtherValues = randomBoolean();
if (setOtherValues) {
threadContext.putTransient("foo", "bar");
threadContext.putHeader("foo", "bar");
}
assertNull(threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME));
ThreadContext.StoredContext storedContext = ClientHelper.stashWithOrigin(threadContext, origin);
assertEquals(origin, threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME));
assertNull(threadContext.getTransient("foo"));
assertNull(threadContext.getTransient("bar"));
storedContext.close();
assertNull(threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME));
if (setOtherValues) {
assertEquals("bar", threadContext.getTransient("foo"));
assertEquals("bar", threadContext.getHeader("foo"));
}
}
public void testExecuteAsyncWrapsListener() throws Exception {
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
final String headerName = randomAlphaOfLengthBetween(4, 16);
final String headerValue = randomAlphaOfLengthBetween(4, 16);
final String origin = randomAlphaOfLengthBetween(4, 16);
final CountDownLatch latch = new CountDownLatch(2);
final ActionListener<ClusterHealthResponse> listener = ActionListener.wrap(v -> {
assertNull(threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME));
assertEquals(headerValue, threadContext.getHeader(headerName));
latch.countDown();
}, e -> fail(e.getMessage()));
final ClusterHealthRequest request = new ClusterHealthRequest();
threadContext.putHeader(headerName, headerValue);
ClientHelper.executeAsyncWithOrigin(threadContext, origin, request, listener, (req, listener1) -> {
assertSame(request, req);
assertEquals(origin, threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME));
assertNull(threadContext.getHeader(headerName));
latch.countDown();
listener1.onResponse(null);
});
latch.await();
}
public void testExecuteWithClient() throws Exception {
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
final Client client = mock(Client.class);
final ThreadPool threadPool = mock(ThreadPool.class);
when(client.threadPool()).thenReturn(threadPool);
when(threadPool.getThreadContext()).thenReturn(threadContext);
final String headerName = randomAlphaOfLengthBetween(4, 16);
final String headerValue = randomAlphaOfLengthBetween(4, 16);
final String origin = randomAlphaOfLengthBetween(4, 16);
final CountDownLatch latch = new CountDownLatch(2);
final ActionListener<ClusterHealthResponse> listener = ActionListener.wrap(v -> {
assertNull(threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME));
assertEquals(headerValue, threadContext.getHeader(headerName));
latch.countDown();
}, e -> fail(e.getMessage()));
doAnswer(invocationOnMock -> {
assertEquals(origin, threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME));
assertNull(threadContext.getHeader(headerName));
latch.countDown();
((ActionListener)invocationOnMock.getArguments()[2]).onResponse(null);
return null;
}).when(client).execute(anyObject(), anyObject(), anyObject());
threadContext.putHeader(headerName, headerValue);
ClientHelper.executeAsyncWithOrigin(client, origin, ClusterHealthAction.INSTANCE, new ClusterHealthRequest(), listener);
latch.await();
}
public void testClientWithOrigin() throws Exception {
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
final Client client = mock(Client.class);
final ThreadPool threadPool = mock(ThreadPool.class);
when(client.threadPool()).thenReturn(threadPool);
when(threadPool.getThreadContext()).thenReturn(threadContext);
when(client.settings()).thenReturn(Settings.EMPTY);
final String headerName = randomAlphaOfLengthBetween(4, 16);
final String headerValue = randomAlphaOfLengthBetween(4, 16);
final String origin = randomAlphaOfLengthBetween(4, 16);
final CountDownLatch latch = new CountDownLatch(2);
final ActionListener<ClusterHealthResponse> listener = ActionListener.wrap(v -> {
assertNull(threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME));
assertEquals(headerValue, threadContext.getHeader(headerName));
latch.countDown();
}, e -> fail(e.getMessage()));
doAnswer(invocationOnMock -> {
assertEquals(origin, threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME));
assertNull(threadContext.getHeader(headerName));
latch.countDown();
((ActionListener)invocationOnMock.getArguments()[2]).onResponse(null);
return null;
}).when(client).execute(anyObject(), anyObject(), anyObject());
threadContext.putHeader(headerName, headerValue);
Client clientWithOrigin = ClientHelper.clientWithOrigin(client, origin);
clientWithOrigin.execute(null, null, listener);
latch.await();
}
}

View File

@ -1,245 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.logstash;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.MockTransportClient;
import org.elasticsearch.xpack.security.InternalClient;
import org.elasticsearch.xpack.template.TemplateUtils;
import org.junit.After;
import org.junit.Before;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ExecutorService;
import static org.elasticsearch.mock.orig.Mockito.times;
import static org.elasticsearch.xpack.logstash.LogstashTemplateRegistry.LOGSTASH_INDEX_NAME;
import static org.elasticsearch.xpack.logstash.LogstashTemplateRegistry.LOGSTASH_TEMPLATE_NAME;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class LogstashTemplateRegistryTests extends ESTestCase {
private static final int NUM_LOGSTASH_INDEXES = 1; // .logstash
private InternalClient client;
private ExecutorService executorService;
private TransportClient transportClient;
private ThreadPool threadPool;
private ClusterService clusterService;
private LogstashTemplateRegistry logstashTemplateRegistry;
private static final ClusterState EMPTY_CLUSTER_STATE =
new ClusterState.Builder(new ClusterName("test-cluster")).build();
CopyOnWriteArrayList<ActionListener> listeners;
@Before
public void setup() {
executorService = mock(ExecutorService.class);
threadPool = mock(ThreadPool.class);
clusterService = mock(ClusterService.class);
final ExecutorService executorService = EsExecutors.newDirectExecutorService();
when(threadPool.executor(ThreadPool.Names.GENERIC)).thenReturn(executorService);
transportClient = new MockTransportClient(Settings.EMPTY);
class TestInternalClient extends InternalClient {
TestInternalClient(Client transportClient) {
super(Settings.EMPTY, null, transportClient);
}
@Override
protected <Request extends ActionRequest,
Response extends ActionResponse,
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>>
void doExecute(Action<Request, Response, RequestBuilder> action, Request request,
ActionListener<Response> listener) {
listeners.add(listener);
}
}
client = new TestInternalClient(transportClient);
listeners = new CopyOnWriteArrayList<>();
logstashTemplateRegistry = new LogstashTemplateRegistry(Settings.EMPTY, clusterService, client);
}
@After
public void stop() throws InterruptedException {
if (transportClient != null) {
transportClient.close();
}
}
public void testAddsListener() throws Exception {
LogstashTemplateRegistry templateRegistry = new LogstashTemplateRegistry(Settings.EMPTY, clusterService, client);
verify(clusterService, times(1)).addListener(templateRegistry);
}
public void testAddTemplatesIfMissing() throws IOException {
ClusterState.Builder clusterStateBuilder = createClusterStateWithTemplate(
"/" + LOGSTASH_TEMPLATE_NAME + ".json"
);
logstashTemplateRegistry.clusterChanged(new ClusterChangedEvent("test-event",
clusterStateBuilder.build(), EMPTY_CLUSTER_STATE));
assertThat(logstashTemplateRegistry.isTemplateUpToDate(), equalTo(true));
assertThat(listeners, hasSize(0));
}
public void testWrongVersionIndexTemplate_isIdentifiedAsNotUpToDate() throws IOException {
String templateString = "/wrong-version-" + LOGSTASH_TEMPLATE_NAME + ".json";
ClusterState.Builder clusterStateBuilder = createClusterStateWithTemplate(templateString);
logstashTemplateRegistry.clusterChanged(new ClusterChangedEvent("test-event",
clusterStateBuilder.build(), EMPTY_CLUSTER_STATE));
assertThat(logstashTemplateRegistry.isTemplateUpToDate(), equalTo(false));
assertThat(listeners, hasSize(NUM_LOGSTASH_INDEXES));
}
public void testWrongVersionIndexTemplate_isUpdated() throws IOException {
String templateString = "/wrong-version-" + LOGSTASH_TEMPLATE_NAME + ".json";
ClusterState.Builder clusterStateBuilder = createClusterStateWithTemplate(templateString);
final ClusterState clusterState = clusterStateBuilder.build();
logstashTemplateRegistry.clusterChanged(new ClusterChangedEvent("test-event",
clusterState, EMPTY_CLUSTER_STATE));
assertThat(logstashTemplateRegistry.isTemplateUpToDate(), equalTo(false));
assertThat(listeners, hasSize(NUM_LOGSTASH_INDEXES));
assertThat("Expected pending template creation", logstashTemplateRegistry.isTemplateCreationPending(), is(true));
// if we do it again this should not send an update
ActionListener listener = listeners.get(0);
listeners.clear();
logstashTemplateRegistry.clusterChanged(new ClusterChangedEvent("test-event",
clusterState, EMPTY_CLUSTER_STATE));
assertThat(logstashTemplateRegistry.isTemplateUpToDate(), equalTo(false));
assertThat(listeners, hasSize(0));
assertThat("Expected pending template creation", logstashTemplateRegistry.isTemplateCreationPending(), is(true));
// if we now simulate an error...
listener.onFailure(new Exception());
assertThat(logstashTemplateRegistry.isTemplateUpToDate(), equalTo(false));
assertFalse(logstashTemplateRegistry.isTemplateCreationPending());
// ... we should be able to send a new update
logstashTemplateRegistry.clusterChanged(new ClusterChangedEvent("test-event",
clusterState, EMPTY_CLUSTER_STATE));
assertThat(logstashTemplateRegistry.isTemplateUpToDate(), equalTo(false));
assertThat(listeners, hasSize(1));
assertThat("Expected pending template creation", logstashTemplateRegistry.isTemplateCreationPending(), is(true));
// now check what happens if we get back an unacknowledged response
listeners.get(0).onResponse(new TestPutIndexTemplateResponse());
assertThat(logstashTemplateRegistry.isTemplateUpToDate(), equalTo(false));
assertThat("Didn't expect pending template creation", logstashTemplateRegistry.isTemplateCreationPending(), is(false));
// and now let's see what happens if we get back a response
listeners.clear();
logstashTemplateRegistry.clusterChanged(new ClusterChangedEvent("test-event",
clusterState, EMPTY_CLUSTER_STATE));
assertThat(logstashTemplateRegistry.isTemplateUpToDate(), equalTo(false));
assertThat("Expected pending template creation", logstashTemplateRegistry.isTemplateCreationPending(), is(true));
assertThat(listeners, hasSize(1));
listeners.get(0).onResponse(new TestPutIndexTemplateResponse(true));
assertThat(logstashTemplateRegistry.isTemplateUpToDate(), equalTo(true));
assertThat("Didn't expect pending template creation", logstashTemplateRegistry.isTemplateCreationPending(), is(false));
}
private static ClusterState.Builder createClusterStateWithTemplate(String logstashTemplateString) throws IOException {
MetaData.Builder metaDataBuilder = new MetaData.Builder();
IndexTemplateMetaData.Builder logstashTemplateBuilder =
getIndexTemplateMetaData(LOGSTASH_TEMPLATE_NAME, logstashTemplateString);
metaDataBuilder.put(logstashTemplateBuilder);
// add the correct mapping no matter what the template
String logstashMappingString = "/" + LOGSTASH_TEMPLATE_NAME + ".json";
IndexMetaData.Builder logstashIndexMeta =
createIndexMetadata(LOGSTASH_INDEX_NAME, logstashMappingString);
metaDataBuilder.put(logstashIndexMeta);
return ClusterState.builder(state()).metaData(metaDataBuilder.build());
}
private static IndexTemplateMetaData.Builder getIndexTemplateMetaData(
String templateName, String templateString) throws IOException {
String template = TemplateUtils.loadTemplate(templateString, Version.CURRENT.toString(),
LogstashTemplateRegistry.TEMPLATE_VERSION_PATTERN);
PutIndexTemplateRequest request = new PutIndexTemplateRequest();
request.source(template, XContentType.JSON);
IndexTemplateMetaData.Builder templateBuilder =
IndexTemplateMetaData.builder(templateName);
for (Map.Entry<String, String> entry : request.mappings().entrySet()) {
templateBuilder.putMapping(entry.getKey(), entry.getValue());
}
return templateBuilder;
}
private static IndexMetaData.Builder createIndexMetadata(
String indexName, String templateString) throws IOException {
String template = TemplateUtils.loadTemplate(templateString, Version.CURRENT.toString(),
LogstashTemplateRegistry.TEMPLATE_VERSION_PATTERN);
PutIndexTemplateRequest request = new PutIndexTemplateRequest();
request.source(template, XContentType.JSON);
IndexMetaData.Builder indexMetaData = IndexMetaData.builder(indexName);
indexMetaData.settings(Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.build());
for (Map.Entry<String, String> entry : request.mappings().entrySet()) {
indexMetaData.putMapping(entry.getKey(), entry.getValue());
}
return indexMetaData;
}
// cluster state where local node is master
private static ClusterState state() {
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
discoBuilder.masterNodeId("1");
discoBuilder.localNodeId("1");
ClusterState.Builder state = ClusterState.builder(new ClusterName("test-cluster"));
state.nodes(discoBuilder);
state.metaData(MetaData.builder().generateClusterUuidIfNeeded());
return state.build();
}
private static class TestPutIndexTemplateResponse extends PutIndexTemplateResponse {
TestPutIndexTemplateResponse(boolean acknowledged) {
super(acknowledged);
}
TestPutIndexTemplateResponse() {
super();
}
}
}

View File

@ -1,307 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ml;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.ml.job.persistence.AnomalyDetectorsIndex;
import org.elasticsearch.xpack.ml.job.persistence.ElasticsearchMappings;
import org.elasticsearch.xpack.ml.job.persistence.MockClientBuilder;
import org.elasticsearch.xpack.ml.notifications.AuditMessage;
import org.elasticsearch.xpack.ml.notifications.Auditor;
import org.junit.Before;
import org.mockito.ArgumentCaptor;
import java.net.InetAddress;
import java.util.Collections;
import java.util.concurrent.ExecutorService;
import static org.elasticsearch.mock.orig.Mockito.doAnswer;
import static org.elasticsearch.mock.orig.Mockito.times;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.nullValue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class MachineLearningTemplateRegistryTests extends ESTestCase {
private static final String CLUSTER_NAME = "clusterMcClusterFace";
private ClusterService clusterService;
private ExecutorService executorService;
private Client client;
private ThreadPool threadPool;
@Before
public void setUpMocks() {
threadPool = mock(ThreadPool.class);
executorService = mock(ExecutorService.class);
clusterService = mock(ClusterService.class);
client = mock(Client.class);
doAnswer(invocation -> {
((Runnable) invocation.getArguments()[0]).run();
return null;
}).when(executorService).execute(any(Runnable.class));
when(threadPool.executor(ThreadPool.Names.GENERIC)).thenReturn(executorService);
}
public void testAddsListener() throws Exception {
MachineLearningTemplateRegistry templateRegistry =
new MachineLearningTemplateRegistry(Settings.EMPTY, clusterService, client, threadPool);
verify(clusterService, times(1)).addListener(templateRegistry);
}
public void testAddTemplatesIfMissing() throws Exception {
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME);
ArgumentCaptor<PutIndexTemplateRequest> captor = ArgumentCaptor.forClass(PutIndexTemplateRequest.class);
clientBuilder.putTemplate(captor);
MachineLearningTemplateRegistry templateRegistry =
new MachineLearningTemplateRegistry(Settings.EMPTY, clusterService, clientBuilder.build(), threadPool);
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
.nodes(DiscoveryNodes.builder()
.add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT))
.localNodeId("_node_id")
.masterNodeId("_node_id"))
.metaData(MetaData.builder())
.build();
templateRegistry.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
verify(threadPool, times(4)).executor(anyString());
assertFalse(templateRegistry.putMlNotificationsIndexTemplateCheck.get());
assertFalse(templateRegistry.putMlMetaIndexTemplateCheck.get());
assertFalse(templateRegistry.putMlNotificationsIndexTemplateCheck.get());
assertFalse(templateRegistry.putResultsIndexTemplateCheck.get());
}
public void testAddTemplatesIfMissing_alreadyInitialized() throws Exception {
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME);
ArgumentCaptor<PutIndexTemplateRequest> captor = ArgumentCaptor.forClass(PutIndexTemplateRequest.class);
clientBuilder.putTemplate(captor);
MachineLearningTemplateRegistry templateRegistry =
new MachineLearningTemplateRegistry(Settings.EMPTY, clusterService, clientBuilder.build(), threadPool);
ClusterState cs = ClusterState.builder(new ClusterName("_name"))
.nodes(DiscoveryNodes.builder()
.add(new DiscoveryNode("_node_id", new TransportAddress(InetAddress.getLoopbackAddress(), 9200), Version.CURRENT))
.localNodeId("_node_id")
.masterNodeId("_node_id"))
.metaData(MetaData.builder()
.put(IndexMetaData.builder(Auditor.NOTIFICATIONS_INDEX).settings(Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
))
.put(IndexMetaData.builder(MlMetaIndex.INDEX_NAME).settings(Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
))
.put(IndexMetaData.builder(AnomalyDetectorsIndex.jobStateIndexName()).settings(Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
))
.put(IndexTemplateMetaData.builder(Auditor.NOTIFICATIONS_INDEX).version(Version.CURRENT.id).build())
.put(IndexTemplateMetaData.builder(MlMetaIndex.INDEX_NAME).version(Version.CURRENT.id).build())
.put(IndexTemplateMetaData.builder(AnomalyDetectorsIndex.jobStateIndexName()).version(Version.CURRENT.id).build())
.put(IndexTemplateMetaData.builder(
AnomalyDetectorsIndex.jobResultsIndexPrefix()).version(Version.CURRENT.id).build())
.putCustom(MlMetadata.TYPE, new MlMetadata.Builder().build()))
.build();
templateRegistry.clusterChanged(new ClusterChangedEvent("_source", cs, cs));
verify(threadPool, times(0)).executor(anyString());
assertFalse(templateRegistry.putMlNotificationsIndexTemplateCheck.get());
assertFalse(templateRegistry.putMlMetaIndexTemplateCheck.get());
assertFalse(templateRegistry.putStateIndexTemplateCheck.get());
assertFalse(templateRegistry.putResultsIndexTemplateCheck.get());
}
public void testMlResultsIndexSettings() {
MachineLearningTemplateRegistry templateRegistry =
new MachineLearningTemplateRegistry(createSettings(), clusterService, client, threadPool);
Settings settings = templateRegistry.mlResultsIndexSettings().build();
assertEquals(3, settings.size());
assertThat(settings.get("index.number_of_shards"), is(nullValue()));
assertEquals("async", settings.get("index.translog.durability"));
assertEquals("all_field_values", settings.get("index.query.default_field"));
assertEquals("2s", settings.get("index.unassigned.node_left.delayed_timeout"));
}
public void testMlAuditIndexSettings() {
MachineLearningTemplateRegistry templateRegistry =
new MachineLearningTemplateRegistry(createSettings(), clusterService, client, threadPool);
Settings settings = templateRegistry.mlNotificationIndexSettings().build();
assertEquals(2, settings.size());
assertEquals("1", settings.get("index.number_of_shards"));
assertEquals("2s", settings.get("index.unassigned.node_left.delayed_timeout"));
}
public void testMlStateIndexSettings() {
MachineLearningTemplateRegistry templateRegistry =
new MachineLearningTemplateRegistry(createSettings(), clusterService, client, threadPool);
Settings settings = templateRegistry.mlStateIndexSettings().build();
assertEquals(2, settings.size());
assertEquals("async", settings.get("index.translog.durability"));
assertEquals("2s", settings.get("index.unassigned.node_left.delayed_timeout"));
}
public void testPutNotificationIndexTemplate() {
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME);
ArgumentCaptor<PutIndexTemplateRequest> captor = ArgumentCaptor.forClass(PutIndexTemplateRequest.class);
clientBuilder.putTemplate(captor);
MachineLearningTemplateRegistry templateRegistry =
new MachineLearningTemplateRegistry(createSettings(), clusterService, clientBuilder.build(), threadPool);
templateRegistry.putNotificationMessageIndexTemplate((result, error) -> {
assertTrue(result);
PutIndexTemplateRequest request = captor.getValue();
assertNotNull(request);
assertEquals(templateRegistry.mlNotificationIndexSettings().build(), request.settings());
assertTrue(request.mappings().containsKey(AuditMessage.TYPE.getPreferredName()));
assertEquals(1, request.mappings().size());
assertEquals(Collections.singletonList(Auditor.NOTIFICATIONS_INDEX), request.patterns());
assertEquals(new Integer(Version.CURRENT.id), request.version());
});
}
public void testPutMetaIndexTemplate() {
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME);
ArgumentCaptor<PutIndexTemplateRequest> captor = ArgumentCaptor.forClass(PutIndexTemplateRequest.class);
clientBuilder.putTemplate(captor);
MachineLearningTemplateRegistry templateRegistry =
new MachineLearningTemplateRegistry(createSettings(), clusterService, clientBuilder.build(), threadPool);
templateRegistry.putMetaIndexTemplate((result, error) -> {
assertTrue(result);
PutIndexTemplateRequest request = captor.getValue();
assertNotNull(request);
assertEquals(templateRegistry.mlNotificationIndexSettings().build(), request.settings());
assertEquals(1, request.mappings().size());
assertThat(request.mappings().containsKey("doc"), is(true));
assertEquals(Collections.singletonList(MlMetaIndex.INDEX_NAME), request.patterns());
assertEquals(new Integer(Version.CURRENT.id), request.version());
});
}
public void testPutJobStateIndexTemplate() {
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME);
ArgumentCaptor<PutIndexTemplateRequest> captor = ArgumentCaptor.forClass(PutIndexTemplateRequest.class);
clientBuilder.putTemplate(captor);
MachineLearningTemplateRegistry templateRegistry =
new MachineLearningTemplateRegistry(createSettings(), clusterService, clientBuilder.build(), threadPool);
templateRegistry.putJobStateIndexTemplate((result, error) -> {
assertTrue(result);
PutIndexTemplateRequest request = captor.getValue();
assertNotNull(request);
assertEquals(templateRegistry.mlStateIndexSettings().build(), request.settings());
assertTrue(request.mappings().containsKey(ElasticsearchMappings.DOC_TYPE));
assertEquals(1, request.mappings().size());
assertEquals(Collections.singletonList(AnomalyDetectorsIndex.jobStateIndexName()), request.patterns());
assertEquals(new Integer(Version.CURRENT.id), request.version());
});
}
public void testPutJobResultsIndexTemplate() {
MockClientBuilder clientBuilder = new MockClientBuilder(CLUSTER_NAME);
ArgumentCaptor<PutIndexTemplateRequest> captor = ArgumentCaptor.forClass(PutIndexTemplateRequest.class);
clientBuilder.putTemplate(captor);
MachineLearningTemplateRegistry templateRegistry =
new MachineLearningTemplateRegistry(createSettings(), clusterService, clientBuilder.build(), threadPool);
templateRegistry.putJobResultsIndexTemplate((result, error) -> {
assertTrue(result);
PutIndexTemplateRequest request = captor.getValue();
assertNotNull(request);
assertEquals(templateRegistry.mlResultsIndexSettings().build(), request.settings());
assertTrue(request.mappings().containsKey("doc"));
assertEquals(1, request.mappings().size());
assertEquals(Collections.singletonList(AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*"), request.patterns());
assertEquals(new Integer(Version.CURRENT.id), request.version());
});
}
public void testTemplateIsPresentAndUpToDate() {
// missing template
MetaData metaData = MetaData.builder().build();
assertFalse(MachineLearningTemplateRegistry.templateIsPresentAndUpToDate(Auditor.NOTIFICATIONS_INDEX, metaData));
// old version of template
IndexTemplateMetaData templateMetaData = IndexTemplateMetaData.builder(Auditor.NOTIFICATIONS_INDEX)
.version(Version.CURRENT.id - 1).build();
metaData = MetaData.builder().put(templateMetaData).build();
assertFalse(MachineLearningTemplateRegistry.templateIsPresentAndUpToDate(Auditor.NOTIFICATIONS_INDEX, metaData));
// latest template
templateMetaData = IndexTemplateMetaData.builder(Auditor.NOTIFICATIONS_INDEX)
.version(Version.CURRENT.id).build();
metaData = MetaData.builder().put(templateMetaData).build();
assertTrue(MachineLearningTemplateRegistry.templateIsPresentAndUpToDate(Auditor.NOTIFICATIONS_INDEX, metaData));
}
public void testAllTemplatesInstalled() {
MetaData metaData = MetaData.builder()
.put(IndexTemplateMetaData.builder(Auditor.NOTIFICATIONS_INDEX).version(Version.CURRENT.id).build())
.put(IndexTemplateMetaData.builder(MlMetaIndex.INDEX_NAME).version(Version.CURRENT.id).build())
.put(IndexTemplateMetaData.builder(AnomalyDetectorsIndex.jobStateIndexName()).version(Version.CURRENT.id).build())
.put(IndexTemplateMetaData.builder(
AnomalyDetectorsIndex.jobResultsIndexPrefix()).version(Version.CURRENT.id).build()).build();
assertTrue(MachineLearningTemplateRegistry.allTemplatesInstalled(metaData));
}
public void testAllTemplatesInstalled_OneMissing() {
MetaData.Builder metaDataBuilder = MetaData.builder();
String missing = randomFrom(MachineLearningTemplateRegistry.TEMPLATE_NAMES);
for (String templateName : MachineLearningTemplateRegistry.TEMPLATE_NAMES) {
if (templateName.equals(missing)) {
continue;
}
metaDataBuilder.put(IndexTemplateMetaData.builder(templateName).version(Version.CURRENT.id).build());
}
assertFalse(MachineLearningTemplateRegistry.allTemplatesInstalled(metaDataBuilder.build()));
}
private Settings createSettings() {
return Settings.builder()
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(2))
.put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 1001L)
.build();
}
}

View File

@ -22,6 +22,7 @@ import static org.elasticsearch.mock.orig.Mockito.verify;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.same;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class MlDailyManagementServiceTests extends ESTestCase {
@ -32,6 +33,7 @@ public class MlDailyManagementServiceTests extends ESTestCase {
public void setUpTests() {
threadPool = new TestThreadPool("MlDailyManagementServiceTests");
client = mock(Client.class);
when(client.threadPool()).thenReturn(threadPool);
}
@After

View File

@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -31,7 +32,6 @@ import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase;
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData.Assignment;
import org.elasticsearch.xpack.persistent.PersistentTasksService;
import org.elasticsearch.xpack.security.InternalClient;
import java.util.ArrayList;
import java.util.Arrays;
@ -292,7 +292,7 @@ public class CloseJobActionRequestTests extends AbstractStreamableXContentTestCa
CloseJobAction.TransportAction transportAction = new CloseJobAction.TransportAction(Settings.EMPTY,
mock(TransportService.class), mock(ThreadPool.class), mock(ActionFilters.class), mock(IndexNameExpressionResolver.class),
clusterService, mock(InternalClient.class), mock(Auditor.class), mock(PersistentTasksService.class));
clusterService, mock(Client.class), mock(Auditor.class), mock(PersistentTasksService.class));
AtomicBoolean gotResponse = new AtomicBoolean(false);
CloseJobAction.Request request = new Request("foo");

View File

@ -8,8 +8,11 @@ package org.elasticsearch.xpack.ml.datafeed;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.mock.orig.Mockito;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.ml.action.util.QueryPage;
import org.elasticsearch.xpack.ml.job.config.DataDescription;
import org.elasticsearch.xpack.ml.job.config.Job;
@ -45,6 +48,10 @@ public class DatafeedJobBuilderTests extends ESTestCase {
@Before
public void init() {
client = mock(Client.class);
ThreadPool threadPool = mock(ThreadPool.class);
when(client.threadPool()).thenReturn(threadPool);
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
when(client.settings()).thenReturn(Settings.EMPTY);
auditor = mock(Auditor.class);
jobProvider = mock(JobProvider.class);
taskHandler = mock(Consumer.class);

View File

@ -8,9 +8,12 @@ package org.elasticsearch.xpack.ml.datafeed;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.mock.orig.Mockito;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.ml.action.FlushJobAction;
import org.elasticsearch.xpack.ml.action.PostDataAction;
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor;
@ -66,6 +69,9 @@ public class DatafeedJobTests extends ESTestCase {
dataExtractor = mock(DataExtractor.class);
when(dataExtractorFactory.newExtractor(anyLong(), anyLong())).thenReturn(dataExtractor);
client = mock(Client.class);
ThreadPool threadPool = mock(ThreadPool.class);
when(client.threadPool()).thenReturn(threadPool);
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
dataDescription = new DataDescription.Builder();
dataDescription.setFormat(DataDescription.DataFormat.XCONTENT);
postDataFuture = mock(ActionFuture.class);
@ -225,6 +231,9 @@ public class DatafeedJobTests extends ESTestCase {
public void testPostAnalysisProblem() throws Exception {
client = mock(Client.class);
ThreadPool threadPool = mock(ThreadPool.class);
when(client.threadPool()).thenReturn(threadPool);
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
when(client.execute(same(FlushJobAction.INSTANCE), any())).thenReturn(flushJobFuture);
when(client.execute(same(PostDataAction.INSTANCE), any())).thenThrow(new RuntimeException());
@ -248,6 +257,9 @@ public class DatafeedJobTests extends ESTestCase {
public void testPostAnalysisProblemIsConflict() throws Exception {
client = mock(Client.class);
ThreadPool threadPool = mock(ThreadPool.class);
when(client.threadPool()).thenReturn(threadPool);
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
when(client.execute(same(FlushJobAction.INSTANCE), any())).thenReturn(flushJobFuture);
when(client.execute(same(PostDataAction.INSTANCE), any())).thenThrow(ExceptionsHelper.conflictStatusException("conflict"));

View File

@ -10,10 +10,13 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilities;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.ml.datafeed.ChunkingConfig;
import org.elasticsearch.xpack.ml.datafeed.DatafeedConfig;
import org.elasticsearch.xpack.ml.datafeed.DatafeedManagerTests;
@ -44,6 +47,9 @@ public class DataExtractorFactoryTests extends ESTestCase {
@Before
public void setUpTests() {
client = mock(Client.class);
ThreadPool threadPool = mock(ThreadPool.class);
when(client.threadPool()).thenReturn(threadPool);
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
fieldsCapabilities = mock(FieldCapabilitiesResponse.class);
givenAggregatableField("time", "date");
givenAggregatableField("field", "keyword");

View File

@ -5,21 +5,18 @@
*/
package org.elasticsearch.xpack.ml.integration;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.index.reindex.ReindexPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.XPackPlugin;
import org.elasticsearch.xpack.XPackSettings;
import org.elasticsearch.xpack.XPackSingleNodeTestCase;
import org.elasticsearch.xpack.ml.MachineLearningTemplateRegistry;
import org.elasticsearch.xpack.ml.MachineLearning;
import org.elasticsearch.xpack.ml.action.DeleteJobAction;
import org.elasticsearch.xpack.ml.action.PutJobAction;
import org.elasticsearch.xpack.ml.action.util.QueryPage;
@ -64,11 +61,8 @@ import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicReference;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@ -256,22 +250,11 @@ public class AutodetectResultProcessorIT extends XPackSingleNodeTestCase {
}
private void putIndexTemplates() throws Exception {
ThreadPool threadPool = mock(ThreadPool.class);
ExecutorService executorService = mock(ExecutorService.class);
doAnswer(invocation -> {
((Runnable) invocation.getArguments()[0]).run();
return null;
}).when(executorService).execute(any(Runnable.class));
when(threadPool.executor(ThreadPool.Names.GENERIC)).thenReturn(executorService);
new MachineLearningTemplateRegistry(Settings.EMPTY, mock(ClusterService.class), client(), threadPool)
.addTemplatesIfMissing(client().admin().cluster().state(new ClusterStateRequest().all()).actionGet().getState());
// block until the templates are installed
assertBusy(() -> {
MetaData metaData = client().admin().cluster().prepareState().get().getState().getMetaData();
ClusterState state = client().admin().cluster().prepareState().get().getState();
assertTrue("Timed out waiting for the ML templates to be installed",
MachineLearningTemplateRegistry.allTemplatesInstalled(metaData));
MachineLearning.allTemplatesInstalled(state));
});
}

View File

@ -7,7 +7,6 @@ package org.elasticsearch.xpack.ml.job.persistence;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
@ -30,6 +29,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.document.DocumentField;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.query.QueryBuilder;
@ -37,6 +37,7 @@ import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.ml.MlMetadata;
import org.elasticsearch.xpack.ml.action.util.QueryPage;
import org.elasticsearch.xpack.ml.job.config.Job;
@ -870,6 +871,9 @@ public class JobProviderTests extends ESTestCase {
private Client getMockedClient(Consumer<QueryBuilder> queryBuilderConsumer, SearchResponse response) {
Client client = mock(Client.class);
ThreadPool threadPool = mock(ThreadPool.class);
when(client.threadPool()).thenReturn(threadPool);
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
doAnswer(invocationOnMock -> {
MultiSearchRequest multiSearchRequest = (MultiSearchRequest) invocationOnMock.getArguments()[0];
queryBuilderConsumer.accept(multiSearchRequest.requests().get(0).source().query());
@ -891,20 +895,4 @@ public class JobProviderTests extends ESTestCase {
}).when(client).search(any(), any());
return client;
}
private Client getMockedClient(GetResponse response) {
Client client = mock(Client.class);
@SuppressWarnings("unchecked")
ActionFuture<GetResponse> actionFuture = mock(ActionFuture.class);
when(client.get(any())).thenReturn(actionFuture);
when(actionFuture.actionGet()).thenReturn(response);
doAnswer(invocationOnMock -> {
@SuppressWarnings("unchecked")
ActionListener<GetResponse> actionListener = (ActionListener<GetResponse>) invocationOnMock.getArguments()[1];
actionListener.onResponse(response);
return null;
}).when(client).get(any(), any());
return client;
}
}

View File

@ -53,6 +53,7 @@ public class JobRenormalizedResultsPersisterTests extends ESTestCase {
}
verify(client, times(1)).bulk(any());
verify(client, times(1)).threadPool();
verifyNoMoreInteractions(client);
}

View File

@ -12,7 +12,9 @@ import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.ml.job.results.AnomalyRecord;
import org.elasticsearch.xpack.ml.job.results.Bucket;
import org.elasticsearch.xpack.ml.job.results.BucketInfluencer;
@ -189,12 +191,16 @@ public class JobResultsPersisterTests extends ESTestCase {
}
verify(client, times(1)).bulk(any());
verify(client, times(1)).threadPool();
verifyNoMoreInteractions(client);
}
@SuppressWarnings({"unchecked", "rawtypes"})
private Client mockClient(ArgumentCaptor<BulkRequest> captor) {
Client client = mock(Client.class);
ThreadPool threadPool = mock(ThreadPool.class);
when(client.threadPool()).thenReturn(threadPool);
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
ActionFuture<BulkResponse> future = mock(ActionFuture.class);
when(future.actionGet()).thenReturn(new BulkResponse(new BulkItemResponse[0], 0L));
when(client.bulk(captor.capture())).thenReturn(future);

View File

@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
@ -43,10 +44,12 @@ import org.elasticsearch.client.ClusterAdminClient;
import org.elasticsearch.client.IndicesAdminClient;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.search.sort.SortBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.ml.action.DeleteJobAction;
import org.mockito.ArgumentCaptor;
import org.mockito.invocation.InvocationOnMock;
@ -89,6 +92,9 @@ public class MockClientBuilder {
when(adminClient.indices()).thenReturn(indicesAdminClient);
Settings settings = Settings.builder().put("cluster.name", clusterName).build();
when(client.settings()).thenReturn(settings);
ThreadPool threadPool = mock(ThreadPool.class);
when(client.threadPool()).thenReturn(threadPool);
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
}
@SuppressWarnings({ "unchecked" })
@ -302,11 +308,11 @@ public class MockClientBuilder {
@Override
public Void answer(InvocationOnMock invocationOnMock) throws Throwable {
ActionListener<IndicesAliasesResponse> listener =
(ActionListener<IndicesAliasesResponse>) invocationOnMock.getArguments()[0];
(ActionListener<IndicesAliasesResponse>) invocationOnMock.getArguments()[1];
listener.onResponse(mock(IndicesAliasesResponse.class));
return null;
}
}).when(aliasesRequestBuilder).execute(any());
}).when(indicesAdminClient).aliases(any(IndicesAliasesRequest.class), any(ActionListener.class));
return this;
}

View File

@ -9,7 +9,10 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.ml.action.UpdateJobAction;
import org.elasticsearch.xpack.ml.job.config.JobUpdate;
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
@ -66,6 +69,9 @@ public class AutoDetectResultProcessorTests extends ESTestCase {
@Before
public void setUpMocks() {
client = mock(Client.class);
ThreadPool threadPool = mock(ThreadPool.class);
when(client.threadPool()).thenReturn(threadPool);
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
renormalizer = mock(Renormalizer.class);
persister = mock(JobResultsPersister.class);
jobProvider = mock(JobProvider.class);

Some files were not shown because too many files have changed in this diff Show More