mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Merge branch 'master' into feature/sql
Original commit: elastic/x-pack-elasticsearch@b5fc2b484d
This commit is contained in:
commit
eaa75cfdbb
@ -178,7 +178,7 @@ endif::[]
|
||||
ifeval::["{release-state}"!="unreleased"]
|
||||
["source","yaml",subs="attributes"]
|
||||
--------------------------------------------
|
||||
version: 2.2
|
||||
version: '2.2'
|
||||
services:
|
||||
elasticsearch:
|
||||
image: {docker-image}
|
||||
|
@ -52,6 +52,7 @@ import org.elasticsearch.xpack.XPackFeatureSet;
|
||||
import org.elasticsearch.xpack.XPackPlugin;
|
||||
import org.elasticsearch.xpack.XPackSettings;
|
||||
import org.elasticsearch.xpack.ml.action.CloseJobAction;
|
||||
import org.elasticsearch.xpack.ml.action.DeleteCalendarAction;
|
||||
import org.elasticsearch.xpack.ml.action.DeleteDatafeedAction;
|
||||
import org.elasticsearch.xpack.ml.action.DeleteExpiredDataAction;
|
||||
import org.elasticsearch.xpack.ml.action.DeleteFilterAction;
|
||||
@ -61,6 +62,7 @@ import org.elasticsearch.xpack.ml.action.FinalizeJobExecutionAction;
|
||||
import org.elasticsearch.xpack.ml.action.FlushJobAction;
|
||||
import org.elasticsearch.xpack.ml.action.ForecastJobAction;
|
||||
import org.elasticsearch.xpack.ml.action.GetBucketsAction;
|
||||
import org.elasticsearch.xpack.ml.action.GetCalendarsAction;
|
||||
import org.elasticsearch.xpack.ml.action.GetCategoriesAction;
|
||||
import org.elasticsearch.xpack.ml.action.GetDatafeedsAction;
|
||||
import org.elasticsearch.xpack.ml.action.GetDatafeedsStatsAction;
|
||||
@ -76,6 +78,7 @@ import org.elasticsearch.xpack.ml.action.KillProcessAction;
|
||||
import org.elasticsearch.xpack.ml.action.OpenJobAction;
|
||||
import org.elasticsearch.xpack.ml.action.PostDataAction;
|
||||
import org.elasticsearch.xpack.ml.action.PreviewDatafeedAction;
|
||||
import org.elasticsearch.xpack.ml.action.PutCalendarAction;
|
||||
import org.elasticsearch.xpack.ml.action.PutDatafeedAction;
|
||||
import org.elasticsearch.xpack.ml.action.PutFilterAction;
|
||||
import org.elasticsearch.xpack.ml.action.PutJobAction;
|
||||
@ -114,6 +117,9 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.NormalizerProcessFactor
|
||||
import org.elasticsearch.xpack.ml.notifications.AuditMessage;
|
||||
import org.elasticsearch.xpack.ml.notifications.Auditor;
|
||||
import org.elasticsearch.xpack.ml.rest.RestDeleteExpiredDataAction;
|
||||
import org.elasticsearch.xpack.ml.rest.calendar.RestDeleteCalendarAction;
|
||||
import org.elasticsearch.xpack.ml.rest.calendar.RestGetCalendarsAction;
|
||||
import org.elasticsearch.xpack.ml.rest.calendar.RestPutCalendarAction;
|
||||
import org.elasticsearch.xpack.ml.rest.datafeeds.RestDeleteDatafeedAction;
|
||||
import org.elasticsearch.xpack.ml.rest.datafeeds.RestGetDatafeedStatsAction;
|
||||
import org.elasticsearch.xpack.ml.rest.datafeeds.RestGetDatafeedsAction;
|
||||
@ -458,7 +464,10 @@ public class MachineLearning implements ActionPlugin {
|
||||
new RestStopDatafeedAction(settings, restController),
|
||||
new RestDeleteModelSnapshotAction(settings, restController),
|
||||
new RestDeleteExpiredDataAction(settings, restController),
|
||||
new RestForecastJobAction(settings, restController)
|
||||
new RestForecastJobAction(settings, restController),
|
||||
new RestGetCalendarsAction(settings, restController),
|
||||
new RestPutCalendarAction(settings, restController),
|
||||
new RestDeleteCalendarAction(settings, restController)
|
||||
);
|
||||
}
|
||||
|
||||
@ -504,7 +513,10 @@ public class MachineLearning implements ActionPlugin {
|
||||
new ActionHandler<>(DeleteModelSnapshotAction.INSTANCE, DeleteModelSnapshotAction.TransportAction.class),
|
||||
new ActionHandler<>(UpdateProcessAction.INSTANCE, UpdateProcessAction.TransportAction.class),
|
||||
new ActionHandler<>(DeleteExpiredDataAction.INSTANCE, DeleteExpiredDataAction.TransportAction.class),
|
||||
new ActionHandler<>(ForecastJobAction.INSTANCE, ForecastJobAction.TransportAction.class)
|
||||
new ActionHandler<>(ForecastJobAction.INSTANCE, ForecastJobAction.TransportAction.class),
|
||||
new ActionHandler<>(GetCalendarsAction.INSTANCE, GetCalendarsAction.TransportAction.class),
|
||||
new ActionHandler<>(PutCalendarAction.INSTANCE, PutCalendarAction.TransportAction.class),
|
||||
new ActionHandler<>(DeleteCalendarAction.INSTANCE, DeleteCalendarAction.TransportAction.class)
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.xpack.ml.datafeed.DatafeedConfig;
|
||||
import org.elasticsearch.xpack.security.authc.Authentication;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationService;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
|
||||
import static org.elasticsearch.xpack.ClientHelper.stashWithOrigin;
|
||||
|
||||
/**
|
||||
* A helper class for actions which decides if we should run via the _xpack user and set ML as origin
|
||||
* or if we should use the run_as functionality by setting the correct headers
|
||||
*/
|
||||
public class MlClientHelper {
|
||||
|
||||
/**
|
||||
* List of headers that are related to security
|
||||
*/
|
||||
public static final Set<String> SECURITY_HEADER_FILTERS = Sets.newHashSet(AuthenticationService.RUN_AS_USER_HEADER,
|
||||
Authentication.AUTHENTICATION_KEY);
|
||||
|
||||
/**
|
||||
* Execute a client operation and return the response, try to run a datafeed search with least privileges, when headers exist
|
||||
*
|
||||
* @param datafeedConfig The config for a datafeed
|
||||
* @param client The client used to query
|
||||
* @param supplier The action to run
|
||||
* @return An instance of the response class
|
||||
*/
|
||||
public static <T extends ActionResponse> T execute(DatafeedConfig datafeedConfig, Client client, Supplier<T> supplier) {
|
||||
return execute(datafeedConfig.getHeaders(), client, supplier);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a client operation and return the response, try to run an action with least privileges, when headers exist
|
||||
*
|
||||
* @param headers Request headers, ideally including security headers
|
||||
* @param client The client used to query
|
||||
* @param supplier The action to run
|
||||
* @return An instance of the response class
|
||||
*/
|
||||
public static <T extends ActionResponse> T execute(Map<String, String> headers, Client client, Supplier<T> supplier) {
|
||||
// no headers, we will have to use the xpack internal user for our execution by specifying the ml origin
|
||||
if (headers == null || headers.isEmpty()) {
|
||||
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
|
||||
return supplier.get();
|
||||
}
|
||||
} else {
|
||||
try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashContext()) {
|
||||
Map<String, String> filteredHeaders = headers.entrySet().stream()
|
||||
.filter(e -> SECURITY_HEADER_FILTERS.contains(e.getKey()))
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
client.threadPool().getThreadContext().copyHeaders(filteredHeaders.entrySet());
|
||||
return supplier.get();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -20,6 +20,8 @@ public final class MlMetaIndex {
|
||||
*/
|
||||
public static final String INDEX_NAME = ".ml-meta";
|
||||
|
||||
public static final String INCLUDE_TYPE_KEY = "include_type";
|
||||
|
||||
public static final String TYPE = "doc";
|
||||
|
||||
private MlMetaIndex() {}
|
||||
|
@ -19,6 +19,7 @@ import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
@ -48,6 +49,7 @@ import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class MlMetadata implements MetaData.Custom {
|
||||
|
||||
@ -101,7 +103,7 @@ public class MlMetadata implements MetaData.Custom {
|
||||
}
|
||||
|
||||
public Set<String> expandDatafeedIds(String expression, boolean allowNoDatafeeds) {
|
||||
return NameResolver.newUnaliased(datafeeds.keySet(), datafeedId -> ExceptionsHelper.missingDatafeedException(datafeedId))
|
||||
return NameResolver.newUnaliased(datafeeds.keySet(), ExceptionsHelper::missingDatafeedException)
|
||||
.expand(expression, allowNoDatafeeds);
|
||||
}
|
||||
|
||||
@ -285,7 +287,7 @@ public class MlMetadata implements MetaData.Custom {
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder putDatafeed(DatafeedConfig datafeedConfig) {
|
||||
public Builder putDatafeed(DatafeedConfig datafeedConfig, ThreadContext threadContext) {
|
||||
if (datafeeds.containsKey(datafeedConfig.getId())) {
|
||||
throw new ResourceAlreadyExistsException("A datafeed with id [" + datafeedConfig.getId() + "] already exists");
|
||||
}
|
||||
@ -293,6 +295,17 @@ public class MlMetadata implements MetaData.Custom {
|
||||
checkJobIsAvailableForDatafeed(jobId);
|
||||
Job job = jobs.get(jobId);
|
||||
DatafeedJobValidator.validate(datafeedConfig, job);
|
||||
|
||||
if (threadContext != null) {
|
||||
// Adjust the request, adding security headers from the current thread context
|
||||
DatafeedConfig.Builder builder = new DatafeedConfig.Builder(datafeedConfig);
|
||||
Map<String, String> headers = threadContext.getHeaders().entrySet().stream()
|
||||
.filter(e -> MlClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey()))
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
builder.setHeaders(headers);
|
||||
datafeedConfig = builder.build();
|
||||
}
|
||||
|
||||
datafeeds.put(datafeedConfig.getId(), datafeedConfig);
|
||||
return this;
|
||||
}
|
||||
@ -309,7 +322,7 @@ public class MlMetadata implements MetaData.Custom {
|
||||
}
|
||||
}
|
||||
|
||||
public Builder updateDatafeed(DatafeedUpdate update, PersistentTasksCustomMetaData persistentTasks) {
|
||||
public Builder updateDatafeed(DatafeedUpdate update, PersistentTasksCustomMetaData persistentTasks, ThreadContext threadContext) {
|
||||
String datafeedId = update.getId();
|
||||
DatafeedConfig oldDatafeedConfig = datafeeds.get(datafeedId);
|
||||
if (oldDatafeedConfig == null) {
|
||||
@ -317,7 +330,7 @@ public class MlMetadata implements MetaData.Custom {
|
||||
}
|
||||
checkDatafeedIsStopped(() -> Messages.getMessage(Messages.DATAFEED_CANNOT_UPDATE_IN_CURRENT_STATE, datafeedId,
|
||||
DatafeedState.STARTED), datafeedId, persistentTasks);
|
||||
DatafeedConfig newDatafeedConfig = update.apply(oldDatafeedConfig);
|
||||
DatafeedConfig newDatafeedConfig = update.apply(oldDatafeedConfig, threadContext);
|
||||
if (newDatafeedConfig.getJobId().equals(oldDatafeedConfig.getJobId()) == false) {
|
||||
checkJobIsAvailableForDatafeed(newDatafeedConfig.getJobId());
|
||||
}
|
||||
@ -393,14 +406,13 @@ public class MlMetadata implements MetaData.Custom {
|
||||
putJob(jobBuilder.build(), true);
|
||||
}
|
||||
|
||||
public void checkJobHasNoDatafeed(String jobId) {
|
||||
void checkJobHasNoDatafeed(String jobId) {
|
||||
Optional<DatafeedConfig> datafeed = getDatafeedByJobId(jobId);
|
||||
if (datafeed.isPresent()) {
|
||||
throw ExceptionsHelper.conflictStatusException("Cannot delete job [" + jobId + "] because datafeed ["
|
||||
+ datafeed.get().getId() + "] refers to it");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -0,0 +1,184 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.action;
|
||||
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.bulk.BulkAction;
|
||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.ml.calendars.Calendar;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
|
||||
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
|
||||
|
||||
public class DeleteCalendarAction extends Action<DeleteCalendarAction.Request, DeleteCalendarAction.Response,
|
||||
DeleteCalendarAction.RequestBuilder> {
|
||||
|
||||
public static final DeleteCalendarAction INSTANCE = new DeleteCalendarAction();
|
||||
public static final String NAME = "cluster:admin/xpack/ml/calendars/delete";
|
||||
|
||||
private DeleteCalendarAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new RequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Response newResponse() {
|
||||
return new Response();
|
||||
}
|
||||
|
||||
public static class Request extends AcknowledgedRequest<Request> {
|
||||
|
||||
|
||||
private String calendarId;
|
||||
|
||||
Request() {
|
||||
|
||||
}
|
||||
|
||||
public Request(String calendarId) {
|
||||
this.calendarId = ExceptionsHelper.requireNonNull(calendarId, Calendar.ID.getPreferredName());
|
||||
}
|
||||
|
||||
public String getCalendarId() {
|
||||
return calendarId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
calendarId = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(calendarId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(calendarId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Request other = (Request) obj;
|
||||
return Objects.equals(calendarId, other.calendarId);
|
||||
}
|
||||
}
|
||||
|
||||
public static class RequestBuilder extends ActionRequestBuilder<Request, Response,
|
||||
RequestBuilder> {
|
||||
|
||||
public RequestBuilder(ElasticsearchClient client, DeleteCalendarAction action) {
|
||||
super(client, action, new Request());
|
||||
}
|
||||
}
|
||||
|
||||
public static class Response extends AcknowledgedResponse {
|
||||
|
||||
public Response(boolean acknowledged) {
|
||||
super(acknowledged);
|
||||
}
|
||||
|
||||
private Response() {}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
readAcknowledged(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
writeAcknowledged(out);
|
||||
}
|
||||
}
|
||||
|
||||
public static class TransportAction extends HandledTransportAction<DeleteCalendarAction.Request, DeleteCalendarAction.Response> {
|
||||
|
||||
private final Client client;
|
||||
|
||||
@Inject
|
||||
public TransportAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Client client) {
|
||||
super(settings, NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, Request::new);
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(DeleteCalendarAction.Request request, ActionListener<DeleteCalendarAction.Response> listener) {
|
||||
|
||||
final String calendarId = request.getCalendarId();
|
||||
|
||||
DeleteRequest deleteRequest = new DeleteRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, Calendar.documentId(calendarId));
|
||||
|
||||
BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();
|
||||
bulkRequestBuilder.add(deleteRequest);
|
||||
bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
executeAsyncWithOrigin(client, ML_ORIGIN, BulkAction.INSTANCE, bulkRequestBuilder.request(),
|
||||
new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse bulkResponse) {
|
||||
if (bulkResponse.getItems()[0].status() == RestStatus.NOT_FOUND) {
|
||||
listener.onFailure(new ResourceNotFoundException("Could not delete calendar with ID [" + calendarId
|
||||
+ "] because it does not exist"));
|
||||
} else {
|
||||
listener.onResponse(new Response(true));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(ExceptionsHelper.serverError("Could not delete calendar with ID [" + calendarId + "]", e));
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
@ -202,8 +202,7 @@ public class DeleteFilterAction extends Action<DeleteFilterAction.Request, Delet
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.error("Could not delete filter with ID [" + filterId + "]", e);
|
||||
listener.onFailure(new IllegalStateException("Could not delete filter with ID [" + filterId + "]", e));
|
||||
listener.onFailure(ExceptionsHelper.serverError("Could not delete filter with ID [" + filterId + "]", e));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -0,0 +1,314 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.action;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.get.GetAction;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.ml.action.util.PageParams;
|
||||
import org.elasticsearch.xpack.ml.action.util.QueryPage;
|
||||
import org.elasticsearch.xpack.ml.calendars.Calendar;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobProvider;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
|
||||
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
|
||||
|
||||
public class GetCalendarsAction extends Action<GetCalendarsAction.Request, GetCalendarsAction.Response, GetCalendarsAction.RequestBuilder> {
|
||||
|
||||
public static final GetCalendarsAction INSTANCE = new GetCalendarsAction();
|
||||
public static final String NAME = "cluster:monitor/xpack/ml/calendars/get";
|
||||
|
||||
private GetCalendarsAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new RequestBuilder(client);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Response newResponse() {
|
||||
return new Response();
|
||||
}
|
||||
|
||||
public static class Request extends ActionRequest {
|
||||
|
||||
private String calendarId;
|
||||
private PageParams pageParams;
|
||||
|
||||
public Request() {
|
||||
}
|
||||
|
||||
public void setCalendarId(String calendarId) {
|
||||
this.calendarId = calendarId;
|
||||
}
|
||||
|
||||
public String getCalendarId() {
|
||||
return calendarId;
|
||||
}
|
||||
|
||||
public PageParams getPageParams() {
|
||||
return pageParams;
|
||||
}
|
||||
|
||||
public void setPageParams(PageParams pageParams) {
|
||||
this.pageParams = pageParams;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
|
||||
if (calendarId != null && pageParams != null) {
|
||||
validationException = addValidationError("Params [" + PageParams.FROM.getPreferredName()
|
||||
+ ", " + PageParams.SIZE.getPreferredName() + "] are incompatible with ["
|
||||
+ Calendar.ID.getPreferredName() + "].",
|
||||
validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
calendarId = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(calendarId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(calendarId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
Request other = (Request) obj;
|
||||
return Objects.equals(calendarId, other.calendarId);
|
||||
}
|
||||
}
|
||||
|
||||
public static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
|
||||
|
||||
public RequestBuilder(ElasticsearchClient client) {
|
||||
super(client, INSTANCE, new Request());
|
||||
}
|
||||
}
|
||||
|
||||
public static class Response extends ActionResponse implements StatusToXContentObject {
|
||||
|
||||
private QueryPage<Calendar> calendars;
|
||||
|
||||
public Response(QueryPage<Calendar> calendars) {
|
||||
this.calendars = calendars;
|
||||
}
|
||||
|
||||
Response() {
|
||||
}
|
||||
|
||||
public QueryPage<Calendar> getCalendars() {
|
||||
return calendars;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
calendars = new QueryPage<>(in, Calendar::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
calendars.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestStatus status() {
|
||||
return RestStatus.OK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
calendars.doXContentBody(builder, params);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(calendars);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
Response other = (Response) obj;
|
||||
return Objects.equals(calendars, other.calendars);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final String toString() {
|
||||
return Strings.toString(this);
|
||||
}
|
||||
}
|
||||
|
||||
public static class TransportAction extends HandledTransportAction<Request, Response> {
|
||||
|
||||
private final Client client;
|
||||
|
||||
@Inject
|
||||
public TransportAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Client client) {
|
||||
super(settings, NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, Request::new);
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Request request, ActionListener<Response> listener) {
|
||||
final String calendarId = request.getCalendarId();
|
||||
if (request.getCalendarId() != null) {
|
||||
getCalendar(calendarId, listener);
|
||||
} else {
|
||||
PageParams pageParams = request.getPageParams();
|
||||
if (pageParams == null) {
|
||||
pageParams = PageParams.defaultParams();
|
||||
}
|
||||
getCalendars(pageParams, listener);
|
||||
}
|
||||
}
|
||||
|
||||
private void getCalendar(String calendarId, ActionListener<Response> listener) {
|
||||
GetRequest getRequest = new GetRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, Calendar.documentId(calendarId));
|
||||
executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener<GetResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetResponse getDocResponse) {
|
||||
|
||||
try {
|
||||
QueryPage<Calendar> calendars;
|
||||
if (getDocResponse.isExists()) {
|
||||
BytesReference docSource = getDocResponse.getSourceAsBytesRef();
|
||||
|
||||
try (XContentParser parser =
|
||||
XContentFactory.xContent(docSource).createParser(NamedXContentRegistry.EMPTY, docSource)) {
|
||||
Calendar calendar = Calendar.PARSER.apply(parser, null).build();
|
||||
calendars = new QueryPage<>(Collections.singletonList(calendar), 1, Calendar.RESULTS_FIELD);
|
||||
|
||||
Response response = new Response(calendars);
|
||||
listener.onResponse(response);
|
||||
}
|
||||
} else {
|
||||
this.onFailure(QueryPage.emptyQueryPage(Calendar.RESULTS_FIELD));
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
this.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void getCalendars(PageParams pageParams, ActionListener<Response> listener) {
|
||||
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder()
|
||||
.from(pageParams.getFrom())
|
||||
.size(pageParams.getSize())
|
||||
.sort(Calendar.ID.getPreferredName())
|
||||
.query(QueryBuilders.termQuery(Calendar.TYPE.getPreferredName(), Calendar.CALENDAR_TYPE));
|
||||
|
||||
SearchRequest searchRequest = new SearchRequest(MlMetaIndex.INDEX_NAME)
|
||||
.indicesOptions(JobProvider.addIgnoreUnavailable(SearchRequest.DEFAULT_INDICES_OPTIONS))
|
||||
.source(sourceBuilder);
|
||||
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, new ActionListener<SearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchResponse response) {
|
||||
List<Calendar> docs = new ArrayList<>();
|
||||
for (SearchHit hit : response.getHits().getHits()) {
|
||||
BytesReference docSource = hit.getSourceRef();
|
||||
try (XContentParser parser = XContentFactory.xContent(docSource).createParser(
|
||||
NamedXContentRegistry.EMPTY, docSource)) {
|
||||
docs.add(Calendar.PARSER.apply(parser, null).build());
|
||||
} catch (IOException e) {
|
||||
this.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
Response getResponse = new Response(
|
||||
new QueryPage<>(docs, docs.size(), Calendar.RESULTS_FIELD));
|
||||
listener.onResponse(getResponse);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
},
|
||||
client::search);
|
||||
}
|
||||
}
|
||||
}
|
@ -11,14 +11,14 @@ import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.get.GetAction;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.get.TransportGetAction;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.TransportSearchAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.Strings;
|
||||
@ -51,6 +51,8 @@ import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
|
||||
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
|
||||
|
||||
|
||||
public class GetFiltersAction extends Action<GetFiltersAction.Request, GetFiltersAction.Response, GetFiltersAction.RequestBuilder> {
|
||||
@ -81,10 +83,6 @@ public class GetFiltersAction extends Action<GetFiltersAction.Request, GetFilter
|
||||
}
|
||||
|
||||
public void setFilterId(String filterId) {
|
||||
if (pageParams != null) {
|
||||
throw new IllegalArgumentException("Param [" + MlFilter.ID.getPreferredName() + "] is incompatible with ["
|
||||
+ PageParams.FROM.getPreferredName()+ ", " + PageParams.SIZE.getPreferredName() + "].");
|
||||
}
|
||||
this.filterId = filterId;
|
||||
}
|
||||
|
||||
@ -97,21 +95,16 @@ public class GetFiltersAction extends Action<GetFiltersAction.Request, GetFilter
|
||||
}
|
||||
|
||||
public void setPageParams(PageParams pageParams) {
|
||||
if (filterId != null) {
|
||||
throw new IllegalArgumentException("Param [" + PageParams.FROM.getPreferredName()
|
||||
+ ", " + PageParams.SIZE.getPreferredName() + "] is incompatible with ["
|
||||
+ MlFilter.ID.getPreferredName() + "].");
|
||||
}
|
||||
this.pageParams = pageParams;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if (pageParams == null && filterId == null) {
|
||||
validationException = addValidationError("Both [" + MlFilter.ID.getPreferredName() + "] and ["
|
||||
+ PageParams.FROM.getPreferredName() + ", " + PageParams.SIZE.getPreferredName() + "] "
|
||||
+ "cannot be null" , validationException);
|
||||
if (pageParams != null && filterId != null) {
|
||||
validationException = addValidationError("Params [" + PageParams.FROM.getPreferredName() +
|
||||
", " + PageParams.SIZE.getPreferredName() + "] are incompatible with ["
|
||||
+ MlFilter.ID.getPreferredName() + "]", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
@ -218,18 +211,16 @@ public class GetFiltersAction extends Action<GetFiltersAction.Request, GetFilter
|
||||
|
||||
public static class TransportAction extends HandledTransportAction<Request, Response> {
|
||||
|
||||
private final TransportGetAction transportGetAction;
|
||||
private final TransportSearchAction transportSearchAction;
|
||||
private final Client client;
|
||||
|
||||
@Inject
|
||||
public TransportAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
TransportGetAction transportGetAction, TransportSearchAction transportSearchAction) {
|
||||
Client client) {
|
||||
super(settings, NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, Request::new);
|
||||
this.transportGetAction = transportGetAction;
|
||||
this.transportSearchAction = transportSearchAction;
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -237,16 +228,18 @@ public class GetFiltersAction extends Action<GetFiltersAction.Request, GetFilter
|
||||
final String filterId = request.getFilterId();
|
||||
if (!Strings.isNullOrEmpty(filterId)) {
|
||||
getFilter(filterId, listener);
|
||||
} else if (request.getPageParams() != null) {
|
||||
getFilters(request.getPageParams(), listener);
|
||||
} else {
|
||||
throw new IllegalStateException("Both filterId and pageParams are null");
|
||||
} else {
|
||||
PageParams pageParams = request.getPageParams();
|
||||
if (pageParams == null) {
|
||||
pageParams = PageParams.defaultParams();
|
||||
}
|
||||
getFilters(pageParams, listener);
|
||||
}
|
||||
}
|
||||
|
||||
private void getFilter(String filterId, ActionListener<Response> listener) {
|
||||
GetRequest getRequest = new GetRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, MlFilter.documentId(filterId));
|
||||
transportGetAction.execute(getRequest, new ActionListener<GetResponse>() {
|
||||
executeAsyncWithOrigin(client, ML_ORIGIN, GetAction.INSTANCE, getRequest, new ActionListener<GetResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetResponse getDocResponse) {
|
||||
|
||||
@ -287,7 +280,7 @@ public class GetFiltersAction extends Action<GetFiltersAction.Request, GetFilter
|
||||
.indicesOptions(JobProvider.addIgnoreUnavailable(SearchRequest.DEFAULT_INDICES_OPTIONS))
|
||||
.source(sourceBuilder);
|
||||
|
||||
transportSearchAction.execute(searchRequest, new ActionListener<SearchResponse>() {
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest, new ActionListener<SearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchResponse response) {
|
||||
List<MlFilter> docs = new ArrayList<>();
|
||||
@ -310,7 +303,8 @@ public class GetFiltersAction extends Action<GetFiltersAction.Request, GetFilter
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
},
|
||||
client::search);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.ml.MlClientHelper;
|
||||
import org.elasticsearch.xpack.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.ml.datafeed.ChunkingConfig;
|
||||
import org.elasticsearch.xpack.ml.datafeed.DatafeedConfig;
|
||||
@ -42,6 +43,7 @@ import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
@ -214,6 +216,10 @@ public class PreviewDatafeedAction extends Action<PreviewDatafeedAction.Request,
|
||||
}
|
||||
DatafeedConfig.Builder datafeedWithAutoChunking = new DatafeedConfig.Builder(datafeed);
|
||||
datafeedWithAutoChunking.setChunkingConfig(ChunkingConfig.newAuto());
|
||||
Map<String, String> headers = threadPool.getThreadContext().getHeaders().entrySet().stream()
|
||||
.filter(e -> MlClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey()))
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
datafeedWithAutoChunking.setHeaders(headers);
|
||||
// NB: this is using the client from the transport layer, NOT the internal client.
|
||||
// This is important because it means the datafeed search will fail if the user
|
||||
// requesting the preview doesn't have permission to search the relevant indices.
|
||||
|
@ -0,0 +1,222 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.action;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.index.IndexAction;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.ml.calendars.Calendar;
|
||||
import org.elasticsearch.xpack.ml.job.messages.Messages;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
|
||||
import static org.elasticsearch.xpack.ClientHelper.executeAsyncWithOrigin;
|
||||
|
||||
public class PutCalendarAction extends Action<PutCalendarAction.Request, PutCalendarAction.Response, PutCalendarAction.RequestBuilder> {
|
||||
public static final PutCalendarAction INSTANCE = new PutCalendarAction();
|
||||
public static final String NAME = "cluster:admin/xpack/ml/calendars/put";
|
||||
|
||||
private PutCalendarAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new RequestBuilder(client);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Response newResponse() {
|
||||
return new Response();
|
||||
}
|
||||
|
||||
public static class Request extends ActionRequest implements ToXContentObject {
|
||||
|
||||
public static Request parseRequest(String calendarId, XContentParser parser) {
|
||||
Calendar.Builder builder = Calendar.PARSER.apply(parser, null);
|
||||
if (builder.getId() == null) {
|
||||
builder.setId(calendarId);
|
||||
} else if (!Strings.isNullOrEmpty(calendarId) && !calendarId.equals(builder.getId())) {
|
||||
// If we have both URI and body filter ID, they must be identical
|
||||
throw new IllegalArgumentException(Messages.getMessage(Messages.INCONSISTENT_ID, Calendar.ID.getPreferredName(),
|
||||
builder.getId(), calendarId));
|
||||
}
|
||||
return new Request(builder.build());
|
||||
}
|
||||
|
||||
private Calendar calendar;
|
||||
|
||||
Request() {
|
||||
|
||||
}
|
||||
|
||||
public Request(Calendar calendar) {
|
||||
this.calendar = ExceptionsHelper.requireNonNull(calendar, "calendar");
|
||||
}
|
||||
|
||||
public Calendar getCalendar() {
|
||||
return calendar;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
if ("_all".equals(calendar.getId())) {
|
||||
validationException =
|
||||
addValidationError("Cannot create a Calendar with the reserved name [_all]",
|
||||
validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
calendar = new Calendar(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
calendar.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
calendar.toXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(calendar);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
Request other = (Request) obj;
|
||||
return Objects.equals(calendar, other.calendar);
|
||||
}
|
||||
}
|
||||
|
||||
public static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
|
||||
|
||||
public RequestBuilder(ElasticsearchClient client) {
|
||||
super(client, INSTANCE, new Request());
|
||||
}
|
||||
}
|
||||
|
||||
public static class Response extends AcknowledgedResponse implements ToXContentObject {
|
||||
|
||||
private Calendar calendar;
|
||||
|
||||
Response() {
|
||||
}
|
||||
|
||||
public Response(Calendar calendar) {
|
||||
super(true);
|
||||
this.calendar = calendar;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
readAcknowledged(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
writeAcknowledged(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
return calendar.toXContent(builder, params);
|
||||
}
|
||||
}
|
||||
|
||||
public static class TransportAction extends HandledTransportAction<Request, Response> {
|
||||
|
||||
private final Client client;
|
||||
|
||||
@Inject
|
||||
public TransportAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, Client client) {
|
||||
super(settings, NAME, threadPool, transportService, actionFilters,
|
||||
indexNameExpressionResolver, Request::new);
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Request request, ActionListener<Response> listener) {
|
||||
final Calendar calendar = request.getCalendar();
|
||||
IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, calendar.documentId());
|
||||
try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
|
||||
indexRequest.source(calendar.toXContent(builder,
|
||||
new ToXContent.MapParams(Collections.singletonMap(MlMetaIndex.INCLUDE_TYPE_KEY, "true"))));
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("Failed to serialise calendar with id [" + calendar.getId() + "]", e);
|
||||
}
|
||||
|
||||
// Make it an error to overwrite an existing calendar
|
||||
indexRequest.opType(DocWriteRequest.OpType.CREATE);
|
||||
indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
|
||||
executeAsyncWithOrigin(client, ML_ORIGIN, IndexAction.INSTANCE, indexRequest,
|
||||
new ActionListener<IndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(IndexResponse indexResponse) {
|
||||
listener.onResponse(new Response(calendar));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(
|
||||
ExceptionsHelper.serverError("Error putting calendar with id [" + calendar.getId() + "]", e));
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
@ -49,7 +49,9 @@ import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.security.support.Exceptions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class PutDatafeedAction extends Action<PutDatafeedAction.Request, PutDatafeedAction.Response, PutDatafeedAction.RequestBuilder> {
|
||||
|
||||
@ -218,8 +220,7 @@ public class PutDatafeedAction extends Action<PutDatafeedAction.Request, PutData
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(Request request, ClusterState state,
|
||||
ActionListener<Response> listener) throws Exception {
|
||||
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) {
|
||||
// If security is enabled only create the datafeed if the user requesting creation has
|
||||
// permission to read the indices the datafeed is going to read from
|
||||
if (securityEnabled) {
|
||||
@ -266,6 +267,7 @@ public class PutDatafeedAction extends Action<PutDatafeedAction.Request, PutData
|
||||
}
|
||||
|
||||
private void putDatafeed(Request request, ActionListener<Response> listener) {
|
||||
|
||||
clusterService.submitStateUpdateTask(
|
||||
"put-datafeed-" + request.getDatafeed().getId(),
|
||||
new AckedClusterStateUpdateTask<Response>(request, listener) {
|
||||
@ -275,13 +277,11 @@ public class PutDatafeedAction extends Action<PutDatafeedAction.Request, PutData
|
||||
if (acknowledged) {
|
||||
logger.info("Created datafeed [{}]", request.getDatafeed().getId());
|
||||
}
|
||||
return new Response(acknowledged,
|
||||
request.getDatafeed());
|
||||
return new Response(acknowledged, request.getDatafeed());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState)
|
||||
throws Exception {
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
return putDatafeed(request, currentState);
|
||||
}
|
||||
});
|
||||
@ -290,7 +290,7 @@ public class PutDatafeedAction extends Action<PutDatafeedAction.Request, PutData
|
||||
private ClusterState putDatafeed(Request request, ClusterState clusterState) {
|
||||
MlMetadata currentMetadata = clusterState.getMetaData().custom(MlMetadata.TYPE);
|
||||
MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata)
|
||||
.putDatafeed(request.getDatafeed()).build();
|
||||
.putDatafeed(request.getDatafeed(), threadPool.getThreadContext()).build();
|
||||
return ClusterState.builder(clusterState).metaData(
|
||||
MetaData.builder(clusterState.getMetaData()).putCustom(MlMetadata.TYPE, newMetadata).build())
|
||||
.build();
|
||||
|
@ -5,7 +5,6 @@
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.action;
|
||||
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
@ -181,7 +180,7 @@ public class PutFilterAction extends Action<PutFilterAction.Request, PutFilterAc
|
||||
MlFilter filter = request.getFilter();
|
||||
IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, filter.documentId());
|
||||
try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
|
||||
ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(MlFilter.INCLUDE_TYPE_KEY, "true"));
|
||||
ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(MlMetaIndex.INCLUDE_TYPE_KEY, "true"));
|
||||
indexRequest.source(filter.toXContent(builder, params));
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("Failed to serialise filter with id [" + filter.getId() + "]", e);
|
||||
@ -199,8 +198,7 @@ public class PutFilterAction extends Action<PutFilterAction.Request, PutFilterAc
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(
|
||||
new ResourceNotFoundException("Could not create filter with ID [" + filter.getId() + "]", e));
|
||||
listener.onFailure(ExceptionsHelper.serverError("Error putting filter with id [" + filter.getId() + "]", e));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -72,9 +72,6 @@ import java.util.Objects;
|
||||
import java.util.function.LongSupplier;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
|
||||
import static org.elasticsearch.xpack.ClientHelper.clientWithOrigin;
|
||||
|
||||
public class StartDatafeedAction
|
||||
extends Action<StartDatafeedAction.Request, StartDatafeedAction.Response, StartDatafeedAction.RequestBuilder> {
|
||||
|
||||
@ -437,7 +434,7 @@ public class StartDatafeedAction
|
||||
super(settings, NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, Request::new);
|
||||
this.licenseState = licenseState;
|
||||
this.persistentTasksService = persistentTasksService;
|
||||
this.client = clientWithOrigin(client, ML_ORIGIN);
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -453,7 +450,7 @@ public class StartDatafeedAction
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
|
||||
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) {
|
||||
DatafeedParams params = request.params;
|
||||
if (licenseState.isMachineLearningAllowed()) {
|
||||
ActionListener<PersistentTask<DatafeedParams>> finalListener = new ActionListener<PersistentTask<DatafeedParams>>() {
|
||||
|
@ -143,8 +143,7 @@ public class UpdateDatafeedAction extends Action<UpdateDatafeedAction.Request, P
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(Request request, ClusterState state, ActionListener<PutDatafeedAction.Response> listener)
|
||||
throws Exception {
|
||||
protected void masterOperation(Request request, ClusterState state, ActionListener<PutDatafeedAction.Response> listener) {
|
||||
clusterService.submitStateUpdateTask("update-datafeed-" + request.getUpdate().getId(),
|
||||
new AckedClusterStateUpdateTask<PutDatafeedAction.Response>(request, listener) {
|
||||
private volatile DatafeedConfig updatedDatafeed;
|
||||
@ -164,7 +163,7 @@ public class UpdateDatafeedAction extends Action<UpdateDatafeedAction.Request, P
|
||||
PersistentTasksCustomMetaData persistentTasks =
|
||||
currentState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE);
|
||||
MlMetadata newMetadata = new MlMetadata.Builder(currentMetadata)
|
||||
.updateDatafeed(update, persistentTasks).build();
|
||||
.updateDatafeed(update, persistentTasks, threadPool.getThreadContext()).build();
|
||||
updatedDatafeed = newMetadata.getDatafeed(update.getId());
|
||||
return ClusterState.builder(currentState).metaData(
|
||||
MetaData.builder(currentState.getMetaData()).putCustom(MlMetadata.TYPE, newMetadata).build()).build();
|
||||
|
@ -25,7 +25,6 @@ public class PageParams implements ToXContentObject, Writeable {
|
||||
public static final int DEFAULT_FROM = 0;
|
||||
public static final int DEFAULT_SIZE = 100;
|
||||
|
||||
|
||||
public static final ConstructingObjectParser<PageParams, Void> PARSER = new ConstructingObjectParser<>(PAGE.getPreferredName(),
|
||||
a -> new PageParams(a[0] == null ? DEFAULT_FROM : (int) a[0], a[1] == null ? DEFAULT_SIZE : (int) a[1]));
|
||||
|
||||
@ -39,6 +38,10 @@ public class PageParams implements ToXContentObject, Writeable {
|
||||
private final int from;
|
||||
private final int size;
|
||||
|
||||
public static PageParams defaultParams() {
|
||||
return new PageParams(DEFAULT_FROM, DEFAULT_SIZE);
|
||||
}
|
||||
|
||||
public PageParams(StreamInput in) throws IOException {
|
||||
this(in.readVInt(), in.readVInt());
|
||||
}
|
||||
|
@ -0,0 +1,139 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.calendars;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.xpack.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.ml.job.config.MlFilter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
public class Calendar implements ToXContentObject, Writeable {
|
||||
|
||||
public static final String CALENDAR_TYPE = "calendar";
|
||||
|
||||
public static final ParseField TYPE = new ParseField("type");
|
||||
public static final ParseField ID = new ParseField("calendar_id");
|
||||
public static final ParseField JOB_IDS = new ParseField("job_ids");
|
||||
|
||||
private static final String DOCUMENT_ID_PREFIX = "calendar_";
|
||||
|
||||
// For QueryPage
|
||||
public static final ParseField RESULTS_FIELD = new ParseField("calendars");
|
||||
|
||||
public static final ObjectParser<Builder, Void> PARSER =
|
||||
new ObjectParser<>(ID.getPreferredName(), Calendar.Builder::new);
|
||||
|
||||
static {
|
||||
PARSER.declareString(Calendar.Builder::setId, ID);
|
||||
PARSER.declareStringArray(Calendar.Builder::setJobIds, JOB_IDS);
|
||||
PARSER.declareString((builder, s) -> {}, TYPE);
|
||||
}
|
||||
|
||||
public static String documentId(String calendarId) {
|
||||
return DOCUMENT_ID_PREFIX + calendarId;
|
||||
}
|
||||
|
||||
private final String id;
|
||||
private final List<String> jobIds;
|
||||
|
||||
public Calendar(String id, List<String> jobIds) {
|
||||
this.id = Objects.requireNonNull(id, ID.getPreferredName() + " must not be null");
|
||||
this.jobIds = Objects.requireNonNull(jobIds, JOB_IDS.getPreferredName() + " must not be null");
|
||||
}
|
||||
|
||||
public Calendar(StreamInput in) throws IOException {
|
||||
id = in.readString();
|
||||
jobIds = Arrays.asList(in.readStringArray());
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public String documentId() {
|
||||
return documentId(id);
|
||||
}
|
||||
|
||||
public List<String> getJobIds() {
|
||||
return new ArrayList<>(jobIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(id);
|
||||
out.writeStringArray(jobIds.toArray(new String[jobIds.size()]));
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(ID.getPreferredName(), id);
|
||||
builder.field(JOB_IDS.getPreferredName(), jobIds);
|
||||
if (params.paramAsBoolean(MlMetaIndex.INCLUDE_TYPE_KEY, false)) {
|
||||
builder.field(TYPE.getPreferredName(), CALENDAR_TYPE);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == this) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!(obj instanceof Calendar)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Calendar other = (Calendar) obj;
|
||||
return id.equals(other.id) && jobIds.equals(other.jobIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(id, jobIds);
|
||||
}
|
||||
|
||||
public static Builder builder() {
|
||||
return new Builder();
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
private String calendarId;
|
||||
private List<String> jobIds = Collections.emptyList();
|
||||
|
||||
public String getId() {
|
||||
return this.calendarId;
|
||||
}
|
||||
|
||||
public void setId(String calendarId) {
|
||||
this.calendarId = calendarId;
|
||||
}
|
||||
|
||||
public Builder setJobIds(List<String> jobIds) {
|
||||
this.jobIds = jobIds;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Calendar build() {
|
||||
return new Calendar(calendarId, jobIds);
|
||||
}
|
||||
}
|
||||
}
|
@ -9,16 +9,19 @@ import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.ml.MlMetaIndex;
|
||||
import org.elasticsearch.xpack.ml.job.config.Connective;
|
||||
import org.elasticsearch.xpack.ml.job.config.DetectionRule;
|
||||
import org.elasticsearch.xpack.ml.job.config.Operator;
|
||||
import org.elasticsearch.xpack.ml.job.config.RuleAction;
|
||||
import org.elasticsearch.xpack.ml.job.config.RuleCondition;
|
||||
import org.elasticsearch.xpack.ml.utils.Intervals;
|
||||
import org.elasticsearch.xpack.ml.utils.time.TimeUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -124,10 +127,27 @@ public class SpecialEvent implements ToXContentObject, Writeable {
|
||||
return documentId(id);
|
||||
}
|
||||
|
||||
public DetectionRule toDetectionRule() {
|
||||
/**
|
||||
* Convert the special event to a detection rule.
|
||||
* The rule will have 2 time based conditions for the start and
|
||||
* end of the event.
|
||||
*
|
||||
* The rule's start and end times are aligned with the bucket span
|
||||
* so the start time is rounded down to a bucket interval and the
|
||||
* end time rounded up.
|
||||
*
|
||||
* @param bucketSpan Bucket span to align to
|
||||
* @return The event as a detection rule.
|
||||
*/
|
||||
public DetectionRule toDetectionRule(TimeValue bucketSpan) {
|
||||
List<RuleCondition> conditions = new ArrayList<>();
|
||||
conditions.add(RuleCondition.createTime(Operator.GTE, this.getStartTime().toEpochSecond()));
|
||||
conditions.add(RuleCondition.createTime(Operator.LT, this.getEndTime().toEpochSecond()));
|
||||
|
||||
long bucketSpanSecs = bucketSpan.getSeconds();
|
||||
|
||||
long bucketStartTime = Intervals.alignToFloor(getStartTime().toEpochSecond(), bucketSpanSecs);
|
||||
conditions.add(RuleCondition.createTime(Operator.GTE, bucketStartTime));
|
||||
long bucketEndTime = Intervals.alignToCeil(getEndTime().toEpochSecond(), bucketSpanSecs);
|
||||
conditions.add(RuleCondition.createTime(Operator.LT, bucketEndTime));
|
||||
|
||||
DetectionRule.Builder builder = new DetectionRule.Builder(conditions);
|
||||
builder.setRuleAction(RuleAction.SKIP_SAMPLING_AND_FILTER_RESULTS);
|
||||
@ -152,7 +172,9 @@ public class SpecialEvent implements ToXContentObject, Writeable {
|
||||
builder.dateField(START_TIME.getPreferredName(), START_TIME.getPreferredName() + "_string", startTime.toInstant().toEpochMilli());
|
||||
builder.dateField(END_TIME.getPreferredName(), END_TIME.getPreferredName() + "_string", endTime.toInstant().toEpochMilli());
|
||||
builder.field(JOB_IDS.getPreferredName(), jobIds);
|
||||
builder.field(TYPE.getPreferredName(), SPECIAL_EVENT_TYPE);
|
||||
if (params.paramAsBoolean(MlMetaIndex.INCLUDE_TYPE_KEY, false)) {
|
||||
builder.field(TYPE.getPreferredName(), SPECIAL_EVENT_TYPE);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ import org.elasticsearch.xpack.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.ml.job.messages.Messages;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
import org.elasticsearch.xpack.ml.utils.MlStrings;
|
||||
import org.elasticsearch.xpack.ml.utils.ToXContentParams;
|
||||
import org.elasticsearch.xpack.ml.utils.time.TimeUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -81,6 +82,7 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
||||
public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields");
|
||||
public static final ParseField SOURCE = new ParseField("_source");
|
||||
public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config");
|
||||
public static final ParseField HEADERS = new ParseField("headers");
|
||||
|
||||
// These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly
|
||||
public static final ObjectParser<Builder, Void> METADATA_PARSER = new ObjectParser<>("datafeed_config", true, Builder::new);
|
||||
@ -117,6 +119,7 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
||||
// TODO this is to read former _source field. Remove in v7.0.0
|
||||
parser.declareBoolean((builder, value) -> {}, SOURCE);
|
||||
parser.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSERS.get(parserType), CHUNKING_CONFIG);
|
||||
parser.declareObject(Builder::setHeaders, (p, c) -> p.mapStrings(), HEADERS);
|
||||
}
|
||||
}
|
||||
|
||||
@ -140,10 +143,11 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
||||
private final List<SearchSourceBuilder.ScriptField> scriptFields;
|
||||
private final Integer scrollSize;
|
||||
private final ChunkingConfig chunkingConfig;
|
||||
private final Map<String, String> headers;
|
||||
|
||||
private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices, List<String> types,
|
||||
QueryBuilder query, AggregatorFactories.Builder aggregations, List<SearchSourceBuilder.ScriptField> scriptFields,
|
||||
Integer scrollSize, ChunkingConfig chunkingConfig) {
|
||||
Integer scrollSize, ChunkingConfig chunkingConfig, Map<String, String> headers) {
|
||||
this.id = id;
|
||||
this.jobId = jobId;
|
||||
this.queryDelay = queryDelay;
|
||||
@ -155,6 +159,7 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
||||
this.scriptFields = scriptFields;
|
||||
this.scrollSize = scrollSize;
|
||||
this.chunkingConfig = chunkingConfig;
|
||||
this.headers = Objects.requireNonNull(headers);
|
||||
}
|
||||
|
||||
public DatafeedConfig(StreamInput in) throws IOException {
|
||||
@ -185,6 +190,11 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
||||
in.readBoolean();
|
||||
}
|
||||
this.chunkingConfig = in.readOptionalWriteable(ChunkingConfig::new);
|
||||
if (in.getVersion().onOrAfter(Version.V_6_2_0)) {
|
||||
this.headers = in.readMap(StreamInput::readString, StreamInput::readString);
|
||||
} else {
|
||||
this.headers = Collections.emptyMap();
|
||||
}
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
@ -245,6 +255,10 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
||||
return chunkingConfig;
|
||||
}
|
||||
|
||||
public Map<String, String> getHeaders() {
|
||||
return headers;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(id);
|
||||
@ -277,6 +291,9 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
out.writeOptionalWriteable(chunkingConfig);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_2_0)) {
|
||||
out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -311,6 +328,10 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
||||
if (chunkingConfig != null) {
|
||||
builder.field(CHUNKING_CONFIG.getPreferredName(), chunkingConfig);
|
||||
}
|
||||
if (headers != null && headers.isEmpty() == false
|
||||
&& params.paramAsBoolean(ToXContentParams.FOR_CLUSTER_STATE, false) == true) {
|
||||
builder.field(HEADERS.getPreferredName(), headers);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
@ -341,13 +362,14 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
||||
&& Objects.equals(this.scrollSize, that.scrollSize)
|
||||
&& Objects.equals(this.aggregations, that.aggregations)
|
||||
&& Objects.equals(this.scriptFields, that.scriptFields)
|
||||
&& Objects.equals(this.chunkingConfig, that.chunkingConfig);
|
||||
&& Objects.equals(this.chunkingConfig, that.chunkingConfig)
|
||||
&& Objects.equals(this.headers, that.headers);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields,
|
||||
chunkingConfig);
|
||||
chunkingConfig, headers);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -420,6 +442,7 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
||||
private List<SearchSourceBuilder.ScriptField> scriptFields;
|
||||
private Integer scrollSize = DEFAULT_SCROLL_SIZE;
|
||||
private ChunkingConfig chunkingConfig;
|
||||
private Map<String, String> headers = Collections.emptyMap();
|
||||
|
||||
public Builder() {
|
||||
}
|
||||
@ -442,6 +465,7 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
||||
this.scriptFields = config.scriptFields;
|
||||
this.scrollSize = config.scrollSize;
|
||||
this.chunkingConfig = config.chunkingConfig;
|
||||
this.headers = config.headers;
|
||||
}
|
||||
|
||||
public void setId(String datafeedId) {
|
||||
@ -452,6 +476,10 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
||||
this.jobId = ExceptionsHelper.requireNonNull(jobId, Job.ID.getPreferredName());
|
||||
}
|
||||
|
||||
public void setHeaders(Map<String, String> headers) {
|
||||
this.headers = headers;
|
||||
}
|
||||
|
||||
public void setIndices(List<String> indices) {
|
||||
this.indices = ExceptionsHelper.requireNonNull(indices, INDICES.getPreferredName());
|
||||
}
|
||||
@ -516,7 +544,7 @@ public class DatafeedConfig extends AbstractDiffable<DatafeedConfig> implements
|
||||
setDefaultChunkingConfig();
|
||||
setDefaultQueryDelay();
|
||||
return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize,
|
||||
chunkingConfig);
|
||||
chunkingConfig, headers);
|
||||
}
|
||||
|
||||
void validateAggregations() {
|
||||
|
@ -25,9 +25,6 @@ import java.util.Objects;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
|
||||
import static org.elasticsearch.xpack.ClientHelper.clientWithOrigin;
|
||||
|
||||
public class DatafeedJobBuilder {
|
||||
|
||||
private final Client client;
|
||||
@ -36,7 +33,7 @@ public class DatafeedJobBuilder {
|
||||
private final Supplier<Long> currentTimeSupplier;
|
||||
|
||||
public DatafeedJobBuilder(Client client, JobProvider jobProvider, Auditor auditor, Supplier<Long> currentTimeSupplier) {
|
||||
this.client = clientWithOrigin(client, ML_ORIGIN);
|
||||
this.client = client;
|
||||
this.jobProvider = Objects.requireNonNull(jobProvider);
|
||||
this.auditor = Objects.requireNonNull(auditor);
|
||||
this.currentTimeSupplier = Objects.requireNonNull(currentTimeSupplier);
|
||||
|
@ -17,6 +17,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.ml.MachineLearning;
|
||||
@ -463,7 +464,16 @@ public class DatafeedManager extends AbstractComponent {
|
||||
}
|
||||
|
||||
private void runTask(StartDatafeedAction.DatafeedTask task) {
|
||||
innerRun(runningDatafeedsOnThisNode.get(task.getAllocationId()), task.getDatafeedStartTime(), task.getEndTime());
|
||||
// This clearing of the thread context is not strictly necessary. Every action performed by the
|
||||
// datafeed _should_ be done using the MlClientHelper, which will set the appropriate thread
|
||||
// context. However, by clearing the thread context here if anyone forgets to use MlClientHelper
|
||||
// somewhere else in the datafeed code then it should cause a failure in the same way in single
|
||||
// and multi node clusters. If we didn't clear the thread context here then there's a risk that
|
||||
// a context with sufficient permissions would coincidentally be in force in some single node
|
||||
// tests, leading to bugs not caught in CI due to many tests running in single node test clusters.
|
||||
try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) {
|
||||
innerRun(runningDatafeedsOnThisNode.get(task.getAllocationId()), task.getDatafeedStartTime(), task.getEndTime());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -89,6 +89,12 @@ public class DatafeedNodeSelector {
|
||||
private AssignmentFailure verifyIndicesActive(DatafeedConfig datafeed) {
|
||||
List<String> indices = datafeed.getIndices();
|
||||
for (String index : indices) {
|
||||
|
||||
if (isRemoteIndex(index)) {
|
||||
// We cannot verify remote indices
|
||||
continue;
|
||||
}
|
||||
|
||||
String[] concreteIndices;
|
||||
String reason = "cannot start datafeed [" + datafeed.getId() + "] because index ["
|
||||
+ index + "] does not exist, is closed, or is still initializing.";
|
||||
@ -115,6 +121,10 @@ public class DatafeedNodeSelector {
|
||||
return null;
|
||||
}
|
||||
|
||||
private boolean isRemoteIndex(String index) {
|
||||
return index.indexOf(':') != -1;
|
||||
}
|
||||
|
||||
private static class AssignmentFailure {
|
||||
private final String reason;
|
||||
private final boolean isCriticalForTaskCreation;
|
||||
|
@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
@ -20,6 +21,7 @@ import org.elasticsearch.index.query.AbstractQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.xpack.ml.MlClientHelper;
|
||||
import org.elasticsearch.xpack.ml.datafeed.extractor.ExtractorUtils;
|
||||
import org.elasticsearch.xpack.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.ml.utils.ExceptionsHelper;
|
||||
@ -29,7 +31,9 @@ import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* A datafeed update contains partial properties to update a {@link DatafeedConfig}.
|
||||
@ -260,7 +264,7 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
|
||||
* Applies the update to the given {@link DatafeedConfig}
|
||||
* @return a new {@link DatafeedConfig} that contains the update
|
||||
*/
|
||||
public DatafeedConfig apply(DatafeedConfig datafeedConfig) {
|
||||
public DatafeedConfig apply(DatafeedConfig datafeedConfig, ThreadContext threadContext) {
|
||||
if (id.equals(datafeedConfig.getId()) == false) {
|
||||
throw new IllegalArgumentException("Cannot apply update to datafeedConfig with different id");
|
||||
}
|
||||
@ -296,6 +300,15 @@ public class DatafeedUpdate implements Writeable, ToXContentObject {
|
||||
if (chunkingConfig != null) {
|
||||
builder.setChunkingConfig(chunkingConfig);
|
||||
}
|
||||
|
||||
if (threadContext != null) {
|
||||
// Adjust the request, adding security headers from the current thread context
|
||||
Map<String, String> headers = threadContext.getHeaders().entrySet().stream()
|
||||
.filter(e -> MlClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey()))
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
builder.setHeaders(headers);
|
||||
}
|
||||
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
@ -14,6 +14,7 @@ import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.search.aggregations.Aggregation;
|
||||
import org.elasticsearch.search.aggregations.Aggregations;
|
||||
import org.elasticsearch.xpack.ml.MlClientHelper;
|
||||
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor;
|
||||
import org.elasticsearch.xpack.ml.datafeed.extractor.ExtractorUtils;
|
||||
|
||||
@ -111,7 +112,7 @@ class AggregationDataExtractor implements DataExtractor {
|
||||
}
|
||||
|
||||
protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) {
|
||||
return searchRequestBuilder.get();
|
||||
return MlClientHelper.execute(context.headers, client, searchRequestBuilder::get);
|
||||
}
|
||||
|
||||
private SearchRequestBuilder buildSearchRequest() {
|
||||
|
@ -9,6 +9,7 @@ import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
@ -24,9 +25,11 @@ class AggregationDataExtractorContext {
|
||||
final long start;
|
||||
final long end;
|
||||
final boolean includeDocCount;
|
||||
final Map<String, String> headers;
|
||||
|
||||
AggregationDataExtractorContext(String jobId, String timeField, Set<String> fields, List<String> indices, List<String> types,
|
||||
QueryBuilder query, AggregatorFactories.Builder aggs, long start, long end, boolean includeDocCount) {
|
||||
QueryBuilder query, AggregatorFactories.Builder aggs, long start, long end, boolean includeDocCount,
|
||||
Map<String, String> headers) {
|
||||
this.jobId = Objects.requireNonNull(jobId);
|
||||
this.timeField = Objects.requireNonNull(timeField);
|
||||
this.fields = Objects.requireNonNull(fields);
|
||||
@ -37,5 +40,6 @@ class AggregationDataExtractorContext {
|
||||
this.start = start;
|
||||
this.end = end;
|
||||
this.includeDocCount = includeDocCount;
|
||||
this.headers = headers;
|
||||
}
|
||||
}
|
||||
|
@ -39,7 +39,8 @@ public class AggregationDataExtractorFactory implements DataExtractorFactory {
|
||||
datafeedConfig.getAggregations(),
|
||||
Intervals.alignToCeil(start, histogramInterval),
|
||||
Intervals.alignToFloor(end, histogramInterval),
|
||||
job.getAnalysisConfig().getSummaryCountFieldName().equals(DatafeedConfig.DOC_COUNT));
|
||||
job.getAnalysisConfig().getSummaryCountFieldName().equals(DatafeedConfig.DOC_COUNT),
|
||||
datafeedConfig.getHeaders());
|
||||
return new AggregationDataExtractor(client, dataExtractorContext);
|
||||
}
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.Aggregations;
|
||||
import org.elasticsearch.search.aggregations.metrics.max.Max;
|
||||
import org.elasticsearch.search.aggregations.metrics.min.Min;
|
||||
import org.elasticsearch.xpack.ml.MlClientHelper;
|
||||
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor;
|
||||
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory;
|
||||
import org.elasticsearch.xpack.ml.datafeed.extractor.ExtractorUtils;
|
||||
@ -133,7 +134,7 @@ public class ChunkedDataExtractor implements DataExtractor {
|
||||
}
|
||||
|
||||
protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) {
|
||||
return searchRequestBuilder.get();
|
||||
return MlClientHelper.execute(context.headers, client, searchRequestBuilder::get);
|
||||
}
|
||||
|
||||
private Optional<InputStream> getNextStream() throws IOException {
|
||||
|
@ -10,6 +10,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
class ChunkedDataExtractorContext {
|
||||
@ -29,10 +30,11 @@ class ChunkedDataExtractorContext {
|
||||
final long end;
|
||||
final TimeValue chunkSpan;
|
||||
final TimeAligner timeAligner;
|
||||
final Map<String, String> headers;
|
||||
|
||||
ChunkedDataExtractorContext(String jobId, String timeField, List<String> indices, List<String> types,
|
||||
QueryBuilder query, int scrollSize, long start, long end, @Nullable TimeValue chunkSpan,
|
||||
TimeAligner timeAligner) {
|
||||
TimeAligner timeAligner, Map<String, String> headers) {
|
||||
this.jobId = Objects.requireNonNull(jobId);
|
||||
this.timeField = Objects.requireNonNull(timeField);
|
||||
this.indices = indices.toArray(new String[indices.size()]);
|
||||
@ -43,5 +45,6 @@ class ChunkedDataExtractorContext {
|
||||
this.end = end;
|
||||
this.chunkSpan = chunkSpan;
|
||||
this.timeAligner = Objects.requireNonNull(timeAligner);
|
||||
this.headers = headers;
|
||||
}
|
||||
}
|
||||
|
@ -41,7 +41,8 @@ public class ChunkedDataExtractorFactory implements DataExtractorFactory {
|
||||
timeAligner.alignToCeil(start),
|
||||
timeAligner.alignToFloor(end),
|
||||
datafeedConfig.getChunkingConfig().getTimeSpan(),
|
||||
timeAligner);
|
||||
timeAligner,
|
||||
datafeedConfig.getHeaders());
|
||||
return new ChunkedDataExtractor(client, dataExtractorFactory, dataExtractorContext);
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@ import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.StoredFieldsContext;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.xpack.ml.MlClientHelper;
|
||||
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor;
|
||||
import org.elasticsearch.xpack.ml.datafeed.extractor.ExtractorUtils;
|
||||
import org.elasticsearch.xpack.ml.utils.DomainSplitFunction;
|
||||
@ -98,7 +99,7 @@ class ScrollDataExtractor implements DataExtractor {
|
||||
}
|
||||
|
||||
protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) {
|
||||
return searchRequestBuilder.get();
|
||||
return MlClientHelper.execute(context.headers, client, searchRequestBuilder::get);
|
||||
}
|
||||
|
||||
private SearchRequestBuilder buildSearchRequest(long start) {
|
||||
@ -182,7 +183,7 @@ class ScrollDataExtractor implements DataExtractor {
|
||||
|
||||
private InputStream continueScroll() throws IOException {
|
||||
LOGGER.debug("[{}] Continuing scroll with id [{}]", context.jobId, scrollId);
|
||||
SearchResponse searchResponse = null;
|
||||
SearchResponse searchResponse;
|
||||
try {
|
||||
searchResponse = executeSearchScrollRequest(scrollId);
|
||||
} catch (SearchPhaseExecutionException searchExecutionException) {
|
||||
@ -208,10 +209,10 @@ class ScrollDataExtractor implements DataExtractor {
|
||||
}
|
||||
|
||||
protected SearchResponse executeSearchScrollRequest(String scrollId) {
|
||||
return SearchScrollAction.INSTANCE.newRequestBuilder(client)
|
||||
return MlClientHelper.execute(context.headers, client, () -> SearchScrollAction.INSTANCE.newRequestBuilder(client)
|
||||
.setScroll(SCROLL_TIMEOUT)
|
||||
.setScrollId(scrollId)
|
||||
.get();
|
||||
.get());
|
||||
}
|
||||
|
||||
private void resetScroll() {
|
||||
@ -223,7 +224,7 @@ class ScrollDataExtractor implements DataExtractor {
|
||||
if (scrollId != null) {
|
||||
ClearScrollRequest request = new ClearScrollRequest();
|
||||
request.addScrollId(scrollId);
|
||||
client.execute(ClearScrollAction.INSTANCE, request).actionGet();
|
||||
MlClientHelper.execute(context.headers, client, () -> client.execute(ClearScrollAction.INSTANCE, request).actionGet());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
class ScrollDataExtractorContext {
|
||||
@ -22,10 +23,11 @@ class ScrollDataExtractorContext {
|
||||
final int scrollSize;
|
||||
final long start;
|
||||
final long end;
|
||||
final Map<String, String> headers;
|
||||
|
||||
ScrollDataExtractorContext(String jobId, ExtractedFields extractedFields, List<String> indices, List<String> types,
|
||||
QueryBuilder query, List<SearchSourceBuilder.ScriptField> scriptFields, int scrollSize,
|
||||
long start, long end) {
|
||||
long start, long end, Map<String, String> headers) {
|
||||
this.jobId = Objects.requireNonNull(jobId);
|
||||
this.extractedFields = Objects.requireNonNull(extractedFields);
|
||||
this.indices = indices.toArray(new String[indices.size()]);
|
||||
@ -35,5 +37,6 @@ class ScrollDataExtractorContext {
|
||||
this.scrollSize = scrollSize;
|
||||
this.start = start;
|
||||
this.end = end;
|
||||
this.headers = headers;
|
||||
}
|
||||
}
|
||||
|
@ -11,8 +11,8 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesAction;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
|
||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.xpack.ml.MlClientHelper;
|
||||
import org.elasticsearch.xpack.ml.datafeed.DatafeedConfig;
|
||||
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractor;
|
||||
import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory;
|
||||
@ -46,7 +46,8 @@ public class ScrollDataExtractorFactory implements DataExtractorFactory {
|
||||
datafeedConfig.getScriptFields(),
|
||||
datafeedConfig.getScrollSize(),
|
||||
start,
|
||||
end);
|
||||
end,
|
||||
datafeedConfig.getHeaders());
|
||||
return new ScrollDataExtractor(client, dataExtractorContext);
|
||||
}
|
||||
|
||||
@ -74,6 +75,10 @@ public class ScrollDataExtractorFactory implements DataExtractorFactory {
|
||||
// multi-fields that are not in source.
|
||||
String[] requestFields = job.allFields().stream().map(f -> MlStrings.getParentField(f) + "*").toArray(size -> new String[size]);
|
||||
fieldCapabilitiesRequest.fields(requestFields);
|
||||
client.execute(FieldCapabilitiesAction.INSTANCE, fieldCapabilitiesRequest, fieldCapabilitiesHandler);
|
||||
MlClientHelper.<FieldCapabilitiesResponse>execute(datafeed, client, () -> {
|
||||
client.execute(FieldCapabilitiesAction.INSTANCE, fieldCapabilitiesRequest, fieldCapabilitiesHandler);
|
||||
// This response gets discarded - the listener handles the real response
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.xpack.ml.MlMetaIndex;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
@ -25,7 +26,6 @@ public class MlFilter implements ToXContentObject, Writeable {
|
||||
|
||||
public static final String DOCUMENT_ID_PREFIX = "filter_";
|
||||
|
||||
public static final String INCLUDE_TYPE_KEY = "include_type";
|
||||
public static final String FILTER_TYPE = "filter";
|
||||
|
||||
public static final ParseField TYPE = new ParseField("type");
|
||||
@ -67,7 +67,7 @@ public class MlFilter implements ToXContentObject, Writeable {
|
||||
builder.startObject();
|
||||
builder.field(ID.getPreferredName(), id);
|
||||
builder.field(ITEMS.getPreferredName(), items);
|
||||
if (params.paramAsBoolean(INCLUDE_TYPE_KEY, false)) {
|
||||
if (params.paramAsBoolean(MlMetaIndex.INCLUDE_TYPE_KEY, false)) {
|
||||
builder.field(TYPE.getPreferredName(), FILTER_TYPE);
|
||||
}
|
||||
builder.endObject();
|
||||
|
@ -16,9 +16,9 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.xpack.ml.calendars.SpecialEvent;
|
||||
import org.elasticsearch.xpack.ml.job.config.DataDescription;
|
||||
import org.elasticsearch.xpack.ml.job.config.DetectionRule;
|
||||
import org.elasticsearch.xpack.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.ml.job.config.JobUpdate;
|
||||
import org.elasticsearch.xpack.ml.job.config.ModelPlotConfig;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.StateStreamer;
|
||||
import org.elasticsearch.xpack.ml.job.process.CountingInputStream;
|
||||
import org.elasticsearch.xpack.ml.job.process.DataCountsReporter;
|
||||
@ -39,6 +39,8 @@ import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.time.Duration;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
@ -49,6 +51,7 @@ import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class AutodetectCommunicator implements Closeable {
|
||||
|
||||
@ -192,17 +195,37 @@ public class AutodetectCommunicator implements Closeable {
|
||||
autodetectProcess.writeUpdateModelPlotMessage(updateParams.getModelPlotConfig());
|
||||
}
|
||||
|
||||
List<DetectionRule> eventsAsRules = Collections.emptyList();
|
||||
if (specialEvents.isEmpty() == false) {
|
||||
eventsAsRules = specialEvents.stream()
|
||||
.map(e -> e.toDetectionRule(job.getAnalysisConfig().getBucketSpan()))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
// All detection rules for a detector must be updated together as the update
|
||||
// wipes any previously set rules.
|
||||
// Build a single list of rules for special events and detection rules.
|
||||
List<List<DetectionRule>> rules = new ArrayList<>(job.getAnalysisConfig().getDetectors().size());
|
||||
for (int i = 0; i < job.getAnalysisConfig().getDetectors().size(); i++) {
|
||||
List<DetectionRule> detectorRules = new ArrayList<>(eventsAsRules);
|
||||
rules.add(detectorRules);
|
||||
}
|
||||
|
||||
// Add detector rules
|
||||
if (updateParams.getDetectorUpdates() != null) {
|
||||
for (JobUpdate.DetectorUpdate update : updateParams.getDetectorUpdates()) {
|
||||
if (update.getRules() != null) {
|
||||
autodetectProcess.writeUpdateDetectorRulesMessage(update.getDetectorIndex(), update.getRules());
|
||||
rules.get(update.getDetectorIndex()).addAll(update.getRules());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (updateParams.isUpdateSpecialEvents()) {
|
||||
autodetectProcess.writeUpdateSpecialEventsMessage(job.getAnalysisConfig().getDetectors().size(), specialEvents);
|
||||
for (int i = 0; i < job.getAnalysisConfig().getDetectors().size(); i++) {
|
||||
if (!rules.get(i).isEmpty()) {
|
||||
autodetectProcess.writeUpdateDetectorRulesMessage(i, rules.get(i));
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}, handler);
|
||||
}
|
||||
|
@ -5,7 +5,6 @@
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.job.process.autodetect;
|
||||
|
||||
import org.elasticsearch.xpack.ml.calendars.SpecialEvent;
|
||||
import org.elasticsearch.xpack.ml.job.config.DetectionRule;
|
||||
import org.elasticsearch.xpack.ml.job.config.ModelPlotConfig;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.StateStreamer;
|
||||
@ -75,17 +74,6 @@ public interface AutodetectProcess extends Closeable {
|
||||
void writeUpdateDetectorRulesMessage(int detectorIndex, List<DetectionRule> rules)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Write the updated special events overwriting any previous events.
|
||||
* Writing an empty list of special events removes any previously set events.
|
||||
*
|
||||
* @param numberOfDetectors The number of detectors in the job. All will be
|
||||
* updated with the special events
|
||||
* @param specialEvents List of events to update
|
||||
* @throws IOException If the write fails
|
||||
*/
|
||||
void writeUpdateSpecialEventsMessage(int numberOfDetectors, List<SpecialEvent> specialEvents) throws IOException;
|
||||
|
||||
/**
|
||||
* Flush the job pushing any stale data into autodetect.
|
||||
* Every flush command generates a unique flush Id which will be output
|
||||
|
@ -5,7 +5,6 @@
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.job.process.autodetect;
|
||||
|
||||
import org.elasticsearch.xpack.ml.calendars.SpecialEvent;
|
||||
import org.elasticsearch.xpack.ml.job.config.DetectionRule;
|
||||
import org.elasticsearch.xpack.ml.job.config.ModelPlotConfig;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.StateStreamer;
|
||||
@ -72,10 +71,6 @@ public class BlackHoleAutodetectProcess implements AutodetectProcess {
|
||||
public void writeUpdateDetectorRulesMessage(int detectorIndex, List<DetectionRule> rules) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeUpdateSpecialEventsMessage(int numberOfDetectors, List<SpecialEvent> specialEvents) throws IOException {
|
||||
}
|
||||
|
||||
/**
|
||||
* Accept the request do nothing with it but write the flush acknowledgement to {@link #readAutodetectResults()}
|
||||
* @param params Should interim results be generated
|
||||
|
@ -9,7 +9,6 @@ import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.xpack.ml.MachineLearning;
|
||||
import org.elasticsearch.xpack.ml.calendars.SpecialEvent;
|
||||
import org.elasticsearch.xpack.ml.job.config.DetectionRule;
|
||||
import org.elasticsearch.xpack.ml.job.config.ModelPlotConfig;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.StateStreamer;
|
||||
@ -42,7 +41,6 @@ import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Autodetect process using native code.
|
||||
@ -161,16 +159,6 @@ class NativeAutodetectProcess implements AutodetectProcess {
|
||||
writer.writeUpdateDetectorRulesMessage(detectorIndex, rules);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeUpdateSpecialEventsMessage(int numberOfEvents, List<SpecialEvent> specialEvents) throws IOException {
|
||||
ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfAnalysisFields);
|
||||
|
||||
List<DetectionRule> eventsAsRules = specialEvents.stream().map(SpecialEvent::toDetectionRule).collect(Collectors.toList());
|
||||
for (int i = 0; i < numberOfEvents; i++) {
|
||||
writer.writeUpdateDetectorRulesMessage(i, eventsAsRules);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String flushJob(FlushJobParams params) throws IOException {
|
||||
ControlMsgToProcessWriter writer = new ControlMsgToProcessWriter(recordWriter, numberOfAnalysisFields);
|
||||
|
@ -79,8 +79,8 @@ public class FieldConfigWriter {
|
||||
|
||||
private void writeDetectors(StringBuilder contents) throws IOException {
|
||||
int counter = 0;
|
||||
|
||||
List<DetectionRule> events = specialEvents.stream().map(SpecialEvent::toDetectionRule).collect(Collectors.toList());
|
||||
List<DetectionRule> events = specialEvents.stream().map(e -> e.toDetectionRule(config.getBucketSpan()))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
for (Detector detector : config.getDetectors()) {
|
||||
int detectorId = counter++;
|
||||
|
@ -0,0 +1,39 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.rest.calendar;
|
||||
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.AcknowledgedRestListener;
|
||||
import org.elasticsearch.xpack.ml.MachineLearning;
|
||||
import org.elasticsearch.xpack.ml.action.DeleteCalendarAction;
|
||||
import org.elasticsearch.xpack.ml.calendars.Calendar;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class RestDeleteCalendarAction extends BaseRestHandler {
|
||||
|
||||
public RestDeleteCalendarAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.DELETE,
|
||||
MachineLearning.BASE_PATH + "calendars/{" + Calendar.ID.getPreferredName() + "}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "xpack_ml_delete_calendar_action";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
|
||||
DeleteCalendarAction.Request request = new DeleteCalendarAction.Request(restRequest.param(Calendar.ID.getPreferredName()));
|
||||
return channel -> client.execute(DeleteCalendarAction.INSTANCE, request, new AcknowledgedRestListener<>(channel));
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.rest.calendar;
|
||||
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.RestStatusToXContentListener;
|
||||
import org.elasticsearch.xpack.ml.MachineLearning;
|
||||
import org.elasticsearch.xpack.ml.action.GetCalendarsAction;
|
||||
import org.elasticsearch.xpack.ml.action.util.PageParams;
|
||||
import org.elasticsearch.xpack.ml.calendars.Calendar;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class RestGetCalendarsAction extends BaseRestHandler {
|
||||
|
||||
public RestGetCalendarsAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.GET, MachineLearning.BASE_PATH + "calendars/{" + Calendar.ID.getPreferredName() + "}",
|
||||
this);
|
||||
controller.registerHandler(RestRequest.Method.GET, MachineLearning.BASE_PATH + "calendars/", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "xpack_ml_get_calendars_action";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
|
||||
GetCalendarsAction.Request getRequest = new GetCalendarsAction.Request();
|
||||
String calendarId = restRequest.param(Calendar.ID.getPreferredName());
|
||||
if (!Strings.isNullOrEmpty(calendarId)) {
|
||||
getRequest.setCalendarId(calendarId);
|
||||
}
|
||||
|
||||
if (restRequest.hasParam(PageParams.FROM.getPreferredName()) || restRequest.hasParam(PageParams.SIZE.getPreferredName())) {
|
||||
getRequest.setPageParams(new PageParams(restRequest.paramAsInt(PageParams.FROM.getPreferredName(), PageParams.DEFAULT_FROM),
|
||||
restRequest.paramAsInt(PageParams.SIZE.getPreferredName(), PageParams.DEFAULT_SIZE)));
|
||||
}
|
||||
|
||||
return channel -> client.execute(GetCalendarsAction.INSTANCE, getRequest, new RestStatusToXContentListener<>(channel));
|
||||
}
|
||||
}
|
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.rest.calendar;
|
||||
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.action.RestToXContentListener;
|
||||
import org.elasticsearch.xpack.ml.MachineLearning;
|
||||
import org.elasticsearch.xpack.ml.action.PutCalendarAction;
|
||||
import org.elasticsearch.xpack.ml.calendars.Calendar;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
public class RestPutCalendarAction extends BaseRestHandler {
|
||||
|
||||
public RestPutCalendarAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(RestRequest.Method.PUT,
|
||||
MachineLearning.BASE_PATH + "calendars/{" + Calendar.ID.getPreferredName() + "}", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getName() {
|
||||
return "xpack_ml_put_calendar_action";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException {
|
||||
String calendarId = restRequest.param(Calendar.ID.getPreferredName());
|
||||
|
||||
PutCalendarAction.Request putCalendarRequest;
|
||||
// A calendar can be created with just a name or with an optional body
|
||||
if (restRequest.hasContentOrSourceParam()) {
|
||||
XContentParser parser = restRequest.contentOrSourceParamParser();
|
||||
putCalendarRequest = PutCalendarAction.Request.parseRequest(calendarId, parser);
|
||||
} else {
|
||||
putCalendarRequest = new PutCalendarAction.Request(new Calendar(calendarId, Collections.emptyList()));
|
||||
}
|
||||
|
||||
return channel -> client.execute(PutCalendarAction.INSTANCE, putCalendarRequest, new RestToXContentListener<>(channel));
|
||||
}
|
||||
}
|
||||
|
@ -40,10 +40,9 @@ public class RestGetFiltersAction extends BaseRestHandler {
|
||||
if (!Strings.isNullOrEmpty(filterId)) {
|
||||
getListRequest.setFilterId(filterId);
|
||||
}
|
||||
if (restRequest.hasParam(PageParams.FROM.getPreferredName())
|
||||
|| restRequest.hasParam(PageParams.SIZE.getPreferredName())
|
||||
|| Strings.isNullOrEmpty(filterId)) {
|
||||
getListRequest.setPageParams(new PageParams(restRequest.paramAsInt(PageParams.FROM.getPreferredName(), PageParams.DEFAULT_FROM),
|
||||
if (restRequest.hasParam(PageParams.FROM.getPreferredName()) || restRequest.hasParam(PageParams.SIZE.getPreferredName())) {
|
||||
getListRequest.setPageParams(
|
||||
new PageParams(restRequest.paramAsInt(PageParams.FROM.getPreferredName(), PageParams.DEFAULT_FROM),
|
||||
restRequest.paramAsInt(PageParams.SIZE.getPreferredName(), PageParams.DEFAULT_SIZE)));
|
||||
}
|
||||
return channel -> client.execute(GetFiltersAction.INSTANCE, getListRequest, new RestStatusToXContentListener<>(channel));
|
||||
|
@ -161,7 +161,7 @@ public class Monitoring implements ActionPlugin {
|
||||
collectors.add(new IndexRecoveryCollector(settings, clusterService, licenseState, client));
|
||||
collectors.add(new JobStatsCollector(settings, clusterService, licenseState, client));
|
||||
|
||||
final MonitoringService monitoringService = new MonitoringService(settings, clusterSettings, threadPool, collectors, exporters);
|
||||
final MonitoringService monitoringService = new MonitoringService(settings, clusterService, threadPool, collectors, exporters);
|
||||
|
||||
return Arrays.asList(monitoringService, exporters, cleanerService);
|
||||
}
|
||||
|
@ -8,6 +8,8 @@ package org.elasticsearch.xpack.monitoring;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
@ -61,6 +63,7 @@ public class MonitoringService extends AbstractLifecycleComponent {
|
||||
/** Task in charge of collecting and exporting monitoring data **/
|
||||
private final MonitoringExecution monitor = new MonitoringExecution();
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final ThreadPool threadPool;
|
||||
private final Set<Collector> collectors;
|
||||
private final Exporters exporters;
|
||||
@ -68,14 +71,15 @@ public class MonitoringService extends AbstractLifecycleComponent {
|
||||
private volatile TimeValue interval;
|
||||
private volatile ThreadPool.Cancellable scheduler;
|
||||
|
||||
MonitoringService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool,
|
||||
MonitoringService(Settings settings, ClusterService clusterService, ThreadPool threadPool,
|
||||
Set<Collector> collectors, Exporters exporters) {
|
||||
super(settings);
|
||||
this.clusterService = Objects.requireNonNull(clusterService);
|
||||
this.threadPool = Objects.requireNonNull(threadPool);
|
||||
this.collectors = Objects.requireNonNull(collectors);
|
||||
this.exporters = Objects.requireNonNull(exporters);
|
||||
this.interval = INTERVAL.get(settings);
|
||||
clusterSettings.addSettingsUpdateConsumer(INTERVAL, this::setInterval);
|
||||
clusterService.getClusterSettings().addSettingsUpdateConsumer(INTERVAL, this::setInterval);
|
||||
}
|
||||
|
||||
void setInterval(TimeValue interval) {
|
||||
@ -191,6 +195,8 @@ public class MonitoringService extends AbstractLifecycleComponent {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
final long timestamp = System.currentTimeMillis();
|
||||
final long intervalInMillis = interval.getMillis();
|
||||
final ClusterState clusterState = clusterService.state();
|
||||
|
||||
final Collection<MonitoringDoc> results = new ArrayList<>();
|
||||
for (Collector collector : collectors) {
|
||||
@ -201,7 +207,7 @@ public class MonitoringService extends AbstractLifecycleComponent {
|
||||
}
|
||||
|
||||
try {
|
||||
Collection<MonitoringDoc> result = collector.collect(timestamp, interval.getMillis());
|
||||
Collection<MonitoringDoc> result = collector.collect(timestamp, intervalInMillis, clusterState);
|
||||
if (result != null) {
|
||||
results.addAll(result);
|
||||
}
|
||||
|
@ -8,6 +8,7 @@ package org.elasticsearch.xpack.monitoring.collector;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
@ -68,8 +69,10 @@ public abstract class Collector extends AbstractComponent {
|
||||
|
||||
/**
|
||||
* Indicates if the current collector is allowed to collect data
|
||||
*
|
||||
* @param isElectedMaster true if the current local node is the elected master node
|
||||
*/
|
||||
protected boolean shouldCollect() {
|
||||
protected boolean shouldCollect(final boolean isElectedMaster) {
|
||||
if (licenseState.isMonitoringAllowed() == false) {
|
||||
logger.trace("collector [{}] can not collect data due to invalid license", name());
|
||||
return false;
|
||||
@ -77,15 +80,12 @@ public abstract class Collector extends AbstractComponent {
|
||||
return true;
|
||||
}
|
||||
|
||||
protected boolean isLocalNodeMaster() {
|
||||
return clusterService.state().nodes().isLocalNodeElectedMaster();
|
||||
}
|
||||
|
||||
public Collection<MonitoringDoc> collect(final long timestamp, final long interval) {
|
||||
public Collection<MonitoringDoc> collect(final long timestamp, final long interval, final ClusterState clusterState) {
|
||||
try {
|
||||
if (shouldCollect()) {
|
||||
final boolean isElectedMaster = clusterState.getNodes().isLocalNodeElectedMaster();
|
||||
if (shouldCollect(isElectedMaster)) {
|
||||
logger.trace("collector [{}] - collecting data...", name());
|
||||
return doCollect(convertNode(timestamp, clusterService.localNode()), interval);
|
||||
return doCollect(convertNode(timestamp, clusterService.localNode()), interval, clusterState);
|
||||
}
|
||||
} catch (ElasticsearchTimeoutException e) {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("collector [{}] timed out when collecting data", name()));
|
||||
@ -95,11 +95,9 @@ public abstract class Collector extends AbstractComponent {
|
||||
return null;
|
||||
}
|
||||
|
||||
protected abstract Collection<MonitoringDoc> doCollect(MonitoringDoc.Node sourceNode, long interval) throws Exception;
|
||||
|
||||
protected String clusterUUID() {
|
||||
return clusterService.state().metaData().clusterUUID();
|
||||
}
|
||||
protected abstract Collection<MonitoringDoc> doCollect(MonitoringDoc.Node node,
|
||||
long interval,
|
||||
ClusterState clusterState) throws Exception;
|
||||
|
||||
/**
|
||||
* Returns a timestamp to use in {@link MonitoringDoc}
|
||||
@ -110,6 +108,16 @@ public abstract class Collector extends AbstractComponent {
|
||||
return System.currentTimeMillis();
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts the current cluster's UUID from a {@link ClusterState}
|
||||
*
|
||||
* @param clusterState the {@link ClusterState}
|
||||
* @return the cluster's UUID
|
||||
*/
|
||||
protected static String clusterUuid(final ClusterState clusterState) {
|
||||
return clusterState.metaData().clusterUUID();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value of the collection timeout configured for the current {@link Collector}.
|
||||
*
|
||||
|
@ -81,13 +81,15 @@ public class ClusterStatsCollector extends Collector {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean shouldCollect() {
|
||||
protected boolean shouldCollect(final boolean isElectedMaster) {
|
||||
// This collector can always collect data on the master node
|
||||
return isLocalNodeMaster();
|
||||
return isElectedMaster;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<MonitoringDoc> doCollect(final MonitoringDoc.Node node, final long interval) throws Exception {
|
||||
protected Collection<MonitoringDoc> doCollect(final MonitoringDoc.Node node,
|
||||
final long interval,
|
||||
final ClusterState clusterState) throws Exception {
|
||||
final Supplier<ClusterStatsResponse> clusterStatsSupplier =
|
||||
() -> client.admin().cluster().prepareClusterStats().get(getCollectionTimeout());
|
||||
final Supplier<List<XPackFeatureSet.Usage>> usageSupplier =
|
||||
@ -96,8 +98,8 @@ public class ClusterStatsCollector extends Collector {
|
||||
final ClusterStatsResponse clusterStats = clusterStatsSupplier.get();
|
||||
|
||||
final String clusterName = clusterService.getClusterName().value();
|
||||
final String clusterUuid = clusterUuid(clusterState);
|
||||
final String version = Version.CURRENT.toString();
|
||||
final ClusterState clusterState = clusterService.state();
|
||||
final License license = licenseService.getLicense();
|
||||
final List<XPackFeatureSet.Usage> xpackUsage = collect(usageSupplier);
|
||||
final boolean apmIndicesExist = doAPMIndicesExist(clusterState);
|
||||
@ -108,7 +110,7 @@ public class ClusterStatsCollector extends Collector {
|
||||
|
||||
// Adds a cluster stats document
|
||||
return Collections.singleton(
|
||||
new ClusterStatsMonitoringDoc(clusterUUID(), timestamp(), interval, node, clusterName, version, clusterStats.getStatus(),
|
||||
new ClusterStatsMonitoringDoc(clusterUuid, timestamp(), interval, node, clusterName, version, clusterStats.getStatus(),
|
||||
license, apmIndicesExist, xpackUsage, clusterStats, clusterState, clusterNeedsTLSEnabled));
|
||||
}
|
||||
|
||||
|
@ -8,6 +8,7 @@ package org.elasticsearch.xpack.monitoring.collector.indices;
|
||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -59,12 +60,14 @@ public class IndexRecoveryCollector extends Collector {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean shouldCollect() {
|
||||
return super.shouldCollect() && isLocalNodeMaster();
|
||||
protected boolean shouldCollect(final boolean isElectedMaster) {
|
||||
return isElectedMaster && super.shouldCollect(isElectedMaster);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<MonitoringDoc> doCollect(final MonitoringDoc.Node node, final long interval) throws Exception {
|
||||
protected Collection<MonitoringDoc> doCollect(final MonitoringDoc.Node node,
|
||||
final long interval,
|
||||
final ClusterState clusterState) throws Exception {
|
||||
List<MonitoringDoc> results = new ArrayList<>(1);
|
||||
RecoveryResponse recoveryResponse = client.admin().indices().prepareRecoveries()
|
||||
.setIndices(getCollectionIndices())
|
||||
@ -73,7 +76,8 @@ public class IndexRecoveryCollector extends Collector {
|
||||
.get(getCollectionTimeout());
|
||||
|
||||
if (recoveryResponse.hasRecoveries()) {
|
||||
results.add(new IndexRecoveryMonitoringDoc(clusterUUID(), timestamp(), interval, node, recoveryResponse));
|
||||
final String clusterUuid = clusterUuid(clusterState);
|
||||
results.add(new IndexRecoveryMonitoringDoc(clusterUuid, timestamp(), interval, node, recoveryResponse));
|
||||
}
|
||||
return Collections.unmodifiableCollection(results);
|
||||
}
|
||||
|
@ -49,12 +49,14 @@ public class IndexStatsCollector extends Collector {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean shouldCollect() {
|
||||
return super.shouldCollect() && isLocalNodeMaster();
|
||||
protected boolean shouldCollect(final boolean isElectedMaster) {
|
||||
return isElectedMaster && super.shouldCollect(isElectedMaster);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<MonitoringDoc> doCollect(final MonitoringDoc.Node node, final long interval) throws Exception {
|
||||
protected Collection<MonitoringDoc> doCollect(final MonitoringDoc.Node node,
|
||||
final long interval,
|
||||
final ClusterState clusterState) throws Exception {
|
||||
final List<MonitoringDoc> results = new ArrayList<>();
|
||||
final IndicesStatsResponse indicesStats = client.admin().indices().prepareStats()
|
||||
.setIndices(getCollectionIndices())
|
||||
@ -73,8 +75,7 @@ public class IndexStatsCollector extends Collector {
|
||||
.get(getCollectionTimeout());
|
||||
|
||||
final long timestamp = timestamp();
|
||||
final String clusterUuid = clusterUUID();
|
||||
final ClusterState clusterState = clusterService.state();
|
||||
final String clusterUuid = clusterUuid(clusterState);
|
||||
|
||||
// add the indices stats that we use to collect the index stats
|
||||
results.add(new IndicesStatsMonitoringDoc(clusterUuid, timestamp, interval, node, indicesStats));
|
||||
|
@ -6,6 +6,7 @@
|
||||
package org.elasticsearch.xpack.monitoring.collector.ml;
|
||||
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
@ -57,15 +58,18 @@ public class JobStatsCollector extends Collector {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean shouldCollect() {
|
||||
protected boolean shouldCollect(final boolean isElectedMaster) {
|
||||
// This can only run when monitoring is allowed + ML is enabled/allowed, but also only on the elected master node
|
||||
return super.shouldCollect() &&
|
||||
XPackSettings.MACHINE_LEARNING_ENABLED.get(settings) && licenseState.isMachineLearningAllowed() &&
|
||||
isLocalNodeMaster();
|
||||
return isElectedMaster
|
||||
&& super.shouldCollect(isElectedMaster)
|
||||
&& XPackSettings.MACHINE_LEARNING_ENABLED.get(settings)
|
||||
&& licenseState.isMachineLearningAllowed();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected List<MonitoringDoc> doCollect(final MonitoringDoc.Node node, final long interval) throws Exception {
|
||||
protected List<MonitoringDoc> doCollect(final MonitoringDoc.Node node,
|
||||
final long interval,
|
||||
final ClusterState clusterState) throws Exception {
|
||||
// fetch details about all jobs
|
||||
try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, MONITORING_ORIGIN)) {
|
||||
final GetJobsStatsAction.Response jobs =
|
||||
@ -73,7 +77,7 @@ public class JobStatsCollector extends Collector {
|
||||
.actionGet(getCollectionTimeout());
|
||||
|
||||
final long timestamp = timestamp();
|
||||
final String clusterUuid = clusterUUID();
|
||||
final String clusterUuid = clusterUuid(clusterState);
|
||||
|
||||
return jobs.getResponse().results().stream()
|
||||
.map(jobStats -> new JobStatsMonitoringDoc(clusterUuid, timestamp, interval, node, jobStats))
|
||||
|
@ -11,6 +11,7 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
|
||||
import org.elasticsearch.bootstrap.BootstrapInfo;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -59,12 +60,14 @@ public class NodeStatsCollector extends Collector {
|
||||
|
||||
// For testing purpose
|
||||
@Override
|
||||
protected boolean shouldCollect() {
|
||||
return super.shouldCollect();
|
||||
protected boolean shouldCollect(final boolean isElectedMaster) {
|
||||
return super.shouldCollect(isElectedMaster);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<MonitoringDoc> doCollect(final MonitoringDoc.Node node, final long interval) throws Exception {
|
||||
protected Collection<MonitoringDoc> doCollect(final MonitoringDoc.Node node,
|
||||
final long interval,
|
||||
final ClusterState clusterState) throws Exception {
|
||||
NodesStatsRequest request = new NodesStatsRequest("_local");
|
||||
request.indices(FLAGS);
|
||||
request.os(true);
|
||||
@ -81,10 +84,11 @@ public class NodeStatsCollector extends Collector {
|
||||
throw response.failures().get(0);
|
||||
}
|
||||
|
||||
final String clusterUuid = clusterUuid(clusterState);
|
||||
final NodeStats nodeStats = response.getNodes().get(0);
|
||||
|
||||
return Collections.singletonList(new NodeStatsMonitoringDoc(clusterUUID(), nodeStats.getTimestamp(), interval, node,
|
||||
node.getUUID(), isLocalNodeMaster(), nodeStats, BootstrapInfo.isMemoryLocked()));
|
||||
return Collections.singletonList(new NodeStatsMonitoringDoc(clusterUuid, nodeStats.getTimestamp(), interval, node,
|
||||
node.getUUID(), clusterState.getNodes().isLocalNodeElectedMaster(), nodeStats, BootstrapInfo.isMemoryLocked()));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -38,21 +38,21 @@ public class ShardsCollector extends Collector {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean shouldCollect() {
|
||||
return super.shouldCollect() && isLocalNodeMaster();
|
||||
protected boolean shouldCollect(final boolean isElectedMaster) {
|
||||
return isElectedMaster && super.shouldCollect(isElectedMaster);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<MonitoringDoc> doCollect(final MonitoringDoc.Node node, final long interval) throws Exception {
|
||||
protected Collection<MonitoringDoc> doCollect(final MonitoringDoc.Node node,
|
||||
final long interval,
|
||||
final ClusterState clusterState) throws Exception {
|
||||
final List<MonitoringDoc> results = new ArrayList<>(1);
|
||||
|
||||
final ClusterState clusterState = clusterService.state();
|
||||
if (clusterState != null) {
|
||||
RoutingTable routingTable = clusterState.routingTable();
|
||||
if (routingTable != null) {
|
||||
List<ShardRouting> shards = routingTable.allShards();
|
||||
if (shards != null) {
|
||||
final String clusterUUID = clusterUUID();
|
||||
final String clusterUuid = clusterUuid(clusterState);
|
||||
final String stateUUID = clusterState.stateUUID();
|
||||
final long timestamp = timestamp();
|
||||
|
||||
@ -66,7 +66,7 @@ public class ShardsCollector extends Collector {
|
||||
// If the shard is assigned to a node, the shard monitoring document refers to this node
|
||||
shardNode = convertNode(node.getTimestamp(), clusterState.getNodes().get(shard.currentNodeId()));
|
||||
}
|
||||
results.add(new ShardMonitoringDoc(clusterUUID, timestamp, interval, shardNode, shard, stateUUID));
|
||||
results.add(new ShardMonitoringDoc(clusterUuid, timestamp, interval, shardNode, shard, stateUUID));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,117 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.ml.datafeed.DatafeedConfig;
|
||||
import org.elasticsearch.xpack.security.authc.Authentication;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationService;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.xpack.ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME;
|
||||
import static org.elasticsearch.xpack.ClientHelper.ML_ORIGIN;
|
||||
import static org.hamcrest.Matchers.hasEntry;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class MlClientHelperTests extends ESTestCase {
|
||||
|
||||
private Client client = mock(Client.class);
|
||||
|
||||
@Before
|
||||
public void setupMocks() {
|
||||
ThreadPool threadPool = mock(ThreadPool.class);
|
||||
ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
|
||||
when(threadPool.getThreadContext()).thenReturn(threadContext);
|
||||
when(client.threadPool()).thenReturn(threadPool);
|
||||
|
||||
PlainActionFuture<SearchResponse> searchFuture = PlainActionFuture.newFuture();
|
||||
searchFuture.onResponse(new SearchResponse());
|
||||
when(client.search(any())).thenReturn(searchFuture);
|
||||
}
|
||||
|
||||
public void testEmptyHeaders() {
|
||||
DatafeedConfig.Builder builder = new DatafeedConfig.Builder("datafeed-foo", "foo");
|
||||
builder.setIndices(Collections.singletonList("foo-index"));
|
||||
|
||||
assertExecutionWithOrigin(builder.build());
|
||||
}
|
||||
|
||||
public void testWithHeaders() {
|
||||
DatafeedConfig.Builder builder = new DatafeedConfig.Builder("datafeed-foo", "foo");
|
||||
builder.setIndices(Collections.singletonList("foo-index"));
|
||||
Map<String, String> headers = MapBuilder.<String, String>newMapBuilder()
|
||||
.put(Authentication.AUTHENTICATION_KEY, "anything")
|
||||
.put(AuthenticationService.RUN_AS_USER_HEADER, "anything")
|
||||
.map();
|
||||
builder.setHeaders(headers);
|
||||
|
||||
assertRunAsExecution(builder.build(), h -> {
|
||||
assertThat(h.keySet(), hasSize(2));
|
||||
assertThat(h, hasEntry(Authentication.AUTHENTICATION_KEY, "anything"));
|
||||
assertThat(h, hasEntry(AuthenticationService.RUN_AS_USER_HEADER, "anything"));
|
||||
});
|
||||
}
|
||||
|
||||
public void testFilteredHeaders() {
|
||||
DatafeedConfig.Builder builder = new DatafeedConfig.Builder("datafeed-foo", "foo");
|
||||
builder.setIndices(Collections.singletonList("foo-index"));
|
||||
Map<String, String> unrelatedHeaders = MapBuilder.<String, String>newMapBuilder()
|
||||
.put(randomAlphaOfLength(10), "anything")
|
||||
.map();
|
||||
builder.setHeaders(unrelatedHeaders);
|
||||
|
||||
assertRunAsExecution(builder.build(), h -> assertThat(h.keySet(), hasSize(0)));
|
||||
}
|
||||
|
||||
/**
|
||||
* This method executes a search and checks if the thread context was enriched with the ml origin
|
||||
*/
|
||||
private void assertExecutionWithOrigin(DatafeedConfig datafeedConfig) {
|
||||
MlClientHelper.execute(datafeedConfig, client, () -> {
|
||||
Object origin = client.threadPool().getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME);
|
||||
assertThat(origin, is(ML_ORIGIN));
|
||||
|
||||
// Check that headers are not set
|
||||
Map<String, String> headers = client.threadPool().getThreadContext().getHeaders();
|
||||
assertThat(headers, not(hasEntry(Authentication.AUTHENTICATION_KEY, "anything")));
|
||||
assertThat(headers, not(hasEntry(AuthenticationService.RUN_AS_USER_HEADER, "anything")));
|
||||
|
||||
return client.search(new SearchRequest()).actionGet();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* This method executes a search and ensures no stashed origin thread context was created, so that the regular node
|
||||
* client was used, to emulate a run_as function
|
||||
*/
|
||||
public void assertRunAsExecution(DatafeedConfig datafeedConfig, Consumer<Map<String, String>> consumer) {
|
||||
MlClientHelper.execute(datafeedConfig, client, () -> {
|
||||
Object origin = client.threadPool().getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME);
|
||||
assertThat(origin, is(nullValue()));
|
||||
|
||||
consumer.accept(client.threadPool().getThreadContext().getHeaders());
|
||||
return client.search(new SearchRequest()).actionGet();
|
||||
});
|
||||
}
|
||||
}
|
@ -29,7 +29,6 @@ import org.elasticsearch.xpack.ml.job.config.JobTaskStatus;
|
||||
import org.elasticsearch.xpack.ml.job.config.JobTests;
|
||||
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.Map;
|
||||
@ -62,7 +61,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
}
|
||||
job = new Job.Builder(job).setAnalysisConfig(analysisConfig).build();
|
||||
builder.putJob(job, false);
|
||||
builder.putDatafeed(datafeedConfig);
|
||||
builder.putDatafeed(datafeedConfig, null);
|
||||
} else {
|
||||
builder.putJob(job, false);
|
||||
}
|
||||
@ -163,7 +162,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", job1.getId()).build();
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder();
|
||||
builder.putJob(job1, false);
|
||||
builder.putDatafeed(datafeedConfig1);
|
||||
builder.putDatafeed(datafeedConfig1, null);
|
||||
|
||||
ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class,
|
||||
() -> builder.deleteJob(job1.getId(), new PersistentTasksCustomMetaData(0L, Collections.emptyMap())));
|
||||
@ -183,7 +182,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", job1.getId()).build();
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder();
|
||||
builder.putJob(job1, false);
|
||||
builder.putDatafeed(datafeedConfig1);
|
||||
builder.putDatafeed(datafeedConfig1, null);
|
||||
|
||||
MlMetadata result = builder.build();
|
||||
assertThat(result.getJobs().get("job_id"), sameInstance(job1));
|
||||
@ -200,7 +199,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", "missing-job").build();
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder();
|
||||
|
||||
expectThrows(ResourceNotFoundException.class, () -> builder.putDatafeed(datafeedConfig1));
|
||||
expectThrows(ResourceNotFoundException.class, () -> builder.putDatafeed(datafeedConfig1, null));
|
||||
}
|
||||
|
||||
public void testPutDatafeed_failBecauseJobIsBeingDeleted() {
|
||||
@ -209,7 +208,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder();
|
||||
builder.putJob(job1, false);
|
||||
|
||||
expectThrows(ResourceNotFoundException.class, () -> builder.putDatafeed(datafeedConfig1));
|
||||
expectThrows(ResourceNotFoundException.class, () -> builder.putDatafeed(datafeedConfig1, null));
|
||||
}
|
||||
|
||||
public void testPutDatafeed_failBecauseDatafeedIdIsAlreadyTaken() {
|
||||
@ -217,9 +216,9 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", job1.getId()).build();
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder();
|
||||
builder.putJob(job1, false);
|
||||
builder.putDatafeed(datafeedConfig1);
|
||||
builder.putDatafeed(datafeedConfig1, null);
|
||||
|
||||
expectThrows(ResourceAlreadyExistsException.class, () -> builder.putDatafeed(datafeedConfig1));
|
||||
expectThrows(ResourceAlreadyExistsException.class, () -> builder.putDatafeed(datafeedConfig1, null));
|
||||
}
|
||||
|
||||
public void testPutDatafeed_failBecauseJobAlreadyHasDatafeed() {
|
||||
@ -228,10 +227,10 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
DatafeedConfig datafeedConfig2 = createDatafeedConfig("datafeed2", job1.getId()).build();
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder();
|
||||
builder.putJob(job1, false);
|
||||
builder.putDatafeed(datafeedConfig1);
|
||||
builder.putDatafeed(datafeedConfig1, null);
|
||||
|
||||
ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class,
|
||||
() -> builder.putDatafeed(datafeedConfig2));
|
||||
() -> builder.putDatafeed(datafeedConfig2, null));
|
||||
assertThat(e.status(), equalTo(RestStatus.CONFLICT));
|
||||
}
|
||||
|
||||
@ -245,7 +244,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder();
|
||||
builder.putJob(job1.build(now), false);
|
||||
|
||||
expectThrows(ElasticsearchStatusException.class, () -> builder.putDatafeed(datafeedConfig1));
|
||||
expectThrows(ElasticsearchStatusException.class, () -> builder.putDatafeed(datafeedConfig1, null));
|
||||
}
|
||||
|
||||
public void testUpdateDatafeed() {
|
||||
@ -253,12 +252,12 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", job1.getId()).build();
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder();
|
||||
builder.putJob(job1, false);
|
||||
builder.putDatafeed(datafeedConfig1);
|
||||
builder.putDatafeed(datafeedConfig1, null);
|
||||
MlMetadata beforeMetadata = builder.build();
|
||||
|
||||
DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeedConfig1.getId());
|
||||
update.setScrollSize(5000);
|
||||
MlMetadata updatedMetadata = new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), null).build();
|
||||
MlMetadata updatedMetadata = new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), null, null).build();
|
||||
|
||||
DatafeedConfig updatedDatafeed = updatedMetadata.getDatafeed(datafeedConfig1.getId());
|
||||
assertThat(updatedDatafeed.getJobId(), equalTo(datafeedConfig1.getJobId()));
|
||||
@ -270,7 +269,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
public void testUpdateDatafeed_failBecauseDatafeedDoesNotExist() {
|
||||
DatafeedUpdate.Builder update = new DatafeedUpdate.Builder("job_id");
|
||||
update.setScrollSize(5000);
|
||||
expectThrows(ResourceNotFoundException.class, () -> new MlMetadata.Builder().updateDatafeed(update.build(), null).build());
|
||||
expectThrows(ResourceNotFoundException.class, () -> new MlMetadata.Builder().updateDatafeed(update.build(), null, null).build());
|
||||
}
|
||||
|
||||
public void testUpdateDatafeed_failBecauseDatafeedIsNotStopped() {
|
||||
@ -278,7 +277,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", job1.getId()).build();
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder();
|
||||
builder.putJob(job1, false);
|
||||
builder.putDatafeed(datafeedConfig1);
|
||||
builder.putDatafeed(datafeedConfig1, null);
|
||||
MlMetadata beforeMetadata = builder.build();
|
||||
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
@ -290,7 +289,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
update.setScrollSize(5000);
|
||||
|
||||
ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class,
|
||||
() -> new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), tasksInProgress));
|
||||
() -> new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), tasksInProgress, null));
|
||||
assertThat(e.status(), equalTo(RestStatus.CONFLICT));
|
||||
}
|
||||
|
||||
@ -299,14 +298,14 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", job1.getId()).build();
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder();
|
||||
builder.putJob(job1, false);
|
||||
builder.putDatafeed(datafeedConfig1);
|
||||
builder.putDatafeed(datafeedConfig1, null);
|
||||
MlMetadata beforeMetadata = builder.build();
|
||||
|
||||
DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeedConfig1.getId());
|
||||
update.setJobId(job1.getId() + "_2");
|
||||
|
||||
expectThrows(ResourceNotFoundException.class,
|
||||
() -> new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), null));
|
||||
() -> new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), null, null));
|
||||
}
|
||||
|
||||
public void testUpdateDatafeed_failBecauseNewJobHasAnotherDatafeedAttached() {
|
||||
@ -318,15 +317,15 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder();
|
||||
builder.putJob(job1, false);
|
||||
builder.putJob(job2.build(), false);
|
||||
builder.putDatafeed(datafeedConfig1);
|
||||
builder.putDatafeed(datafeedConfig2);
|
||||
builder.putDatafeed(datafeedConfig1, null);
|
||||
builder.putDatafeed(datafeedConfig2, null);
|
||||
MlMetadata beforeMetadata = builder.build();
|
||||
|
||||
DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeedConfig1.getId());
|
||||
update.setJobId(job2.getId());
|
||||
|
||||
ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class,
|
||||
() -> new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), null));
|
||||
() -> new MlMetadata.Builder(beforeMetadata).updateDatafeed(update.build(), null, null));
|
||||
assertThat(e.status(), equalTo(RestStatus.CONFLICT));
|
||||
assertThat(e.getMessage(), equalTo("A datafeed [datafeed2] already exists for job [job_id_2]"));
|
||||
}
|
||||
@ -336,7 +335,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
DatafeedConfig datafeedConfig1 = createDatafeedConfig("datafeed1", job1.getId()).build();
|
||||
MlMetadata.Builder builder = new MlMetadata.Builder();
|
||||
builder.putJob(job1, false);
|
||||
builder.putDatafeed(datafeedConfig1);
|
||||
builder.putDatafeed(datafeedConfig1, null);
|
||||
|
||||
MlMetadata result = builder.build();
|
||||
assertThat(result.getJobs().get("job_id"), sameInstance(job1));
|
||||
@ -377,9 +376,9 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
|
||||
public void testExpandDatafeedIds() {
|
||||
MlMetadata.Builder mlMetadataBuilder = newMlMetadataWithJobs("bar-1", "foo-1", "foo-2");
|
||||
mlMetadataBuilder.putDatafeed(createDatafeedConfig("bar-1-feed", "bar-1").build());
|
||||
mlMetadataBuilder.putDatafeed(createDatafeedConfig("foo-1-feed", "foo-1").build());
|
||||
mlMetadataBuilder.putDatafeed(createDatafeedConfig("foo-2-feed", "foo-2").build());
|
||||
mlMetadataBuilder.putDatafeed(createDatafeedConfig("bar-1-feed", "bar-1").build(), null);
|
||||
mlMetadataBuilder.putDatafeed(createDatafeedConfig("foo-1-feed", "foo-1").build(), null);
|
||||
mlMetadataBuilder.putDatafeed(createDatafeedConfig("foo-2-feed", "foo-2").build(), null);
|
||||
MlMetadata mlMetadata = mlMetadataBuilder.build();
|
||||
|
||||
|
||||
@ -399,7 +398,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected MlMetadata mutateInstance(MlMetadata instance) throws IOException {
|
||||
protected MlMetadata mutateInstance(MlMetadata instance) {
|
||||
Map<String, Job> jobs = instance.getJobs();
|
||||
Map<String, DatafeedConfig> datafeeds = instance.getDatafeeds();
|
||||
MlMetadata.Builder metadataBuilder = new MlMetadata.Builder();
|
||||
@ -408,7 +407,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
metadataBuilder.putJob(entry.getValue(), true);
|
||||
}
|
||||
for (Map.Entry<String, DatafeedConfig> entry : datafeeds.entrySet()) {
|
||||
metadataBuilder.putDatafeed(entry.getValue());
|
||||
metadataBuilder.putDatafeed(entry.getValue(), null);
|
||||
}
|
||||
|
||||
switch (between(0, 1)) {
|
||||
@ -429,7 +428,7 @@ public class MlMetadataTests extends AbstractSerializingTestCase<MlMetadata> {
|
||||
}
|
||||
randomJob = new Job.Builder(randomJob).setAnalysisConfig(analysisConfig).build();
|
||||
metadataBuilder.putJob(randomJob, false);
|
||||
metadataBuilder.putDatafeed(datafeedConfig);
|
||||
metadataBuilder.putDatafeed(datafeedConfig, null);
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError("Illegal randomisation branch");
|
||||
|
@ -80,7 +80,7 @@ public class CloseJobActionRequestTests extends AbstractStreamableXContentTestCa
|
||||
MlMetadata.Builder mlBuilder = new MlMetadata.Builder();
|
||||
mlBuilder.putJob(BaseMlIntegTestCase.createScheduledJob("job_id").build(new Date()), false);
|
||||
mlBuilder.putDatafeed(BaseMlIntegTestCase.createDatafeed("datafeed_id", "job_id",
|
||||
Collections.singletonList("*")));
|
||||
Collections.singletonList("*")), null);
|
||||
final PersistentTasksCustomMetaData.Builder startDataFeedTaskBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", null, JobState.OPENED, startDataFeedTaskBuilder);
|
||||
addTask("datafeed_id", 0L, null, DatafeedState.STARTED, startDataFeedTaskBuilder);
|
||||
@ -147,7 +147,7 @@ public class CloseJobActionRequestTests extends AbstractStreamableXContentTestCa
|
||||
request.setForce(true);
|
||||
CloseJobAction.resolveAndValidateJobId(request, cs1, openJobs, closingJobs);
|
||||
assertEquals(Arrays.asList("job_id_1", "job_id_2", "job_id_3"), openJobs);
|
||||
assertEquals(Arrays.asList("job_id_4"), closingJobs);
|
||||
assertEquals(Collections.singletonList("job_id_4"), closingJobs);
|
||||
|
||||
request.setForce(false);
|
||||
expectThrows(ElasticsearchStatusException.class,
|
||||
@ -171,7 +171,7 @@ public class CloseJobActionRequestTests extends AbstractStreamableXContentTestCa
|
||||
|
||||
CloseJobAction.Request request = new CloseJobAction.Request("job_id_1");
|
||||
CloseJobAction.resolveAndValidateJobId(request, cs1, openJobs, closingJobs);
|
||||
assertEquals(Arrays.asList("job_id_1"), openJobs);
|
||||
assertEquals(Collections.singletonList("job_id_1"), openJobs);
|
||||
assertEquals(Collections.emptyList(), closingJobs);
|
||||
|
||||
// Job without task is closed
|
||||
@ -219,7 +219,7 @@ public class CloseJobActionRequestTests extends AbstractStreamableXContentTestCa
|
||||
request.setForce(true);
|
||||
|
||||
CloseJobAction.resolveAndValidateJobId(request, cs1, openJobs, closingJobs);
|
||||
assertEquals(Arrays.asList("job_id_failed"), openJobs);
|
||||
assertEquals(Collections.singletonList("job_id_failed"), openJobs);
|
||||
assertEquals(Collections.emptyList(), closingJobs);
|
||||
|
||||
openJobs.clear();
|
||||
@ -252,7 +252,7 @@ public class CloseJobActionRequestTests extends AbstractStreamableXContentTestCa
|
||||
|
||||
CloseJobAction.resolveAndValidateJobId(new CloseJobAction.Request("_all"), cs1, openJobs, closingJobs);
|
||||
assertEquals(Arrays.asList("job_id_open-1", "job_id_open-2"), openJobs);
|
||||
assertEquals(Arrays.asList("job_id_closing"), closingJobs);
|
||||
assertEquals(Collections.singletonList("job_id_closing"), closingJobs);
|
||||
openJobs.clear();
|
||||
closingJobs.clear();
|
||||
|
||||
@ -264,12 +264,12 @@ public class CloseJobActionRequestTests extends AbstractStreamableXContentTestCa
|
||||
|
||||
CloseJobAction.resolveAndValidateJobId(new CloseJobAction.Request("job_id_closing"), cs1, openJobs, closingJobs);
|
||||
assertEquals(Collections.emptyList(), openJobs);
|
||||
assertEquals(Arrays.asList("job_id_closing"), closingJobs);
|
||||
assertEquals(Collections.singletonList("job_id_closing"), closingJobs);
|
||||
openJobs.clear();
|
||||
closingJobs.clear();
|
||||
|
||||
CloseJobAction.resolveAndValidateJobId(new CloseJobAction.Request("job_id_open-1"), cs1, openJobs, closingJobs);
|
||||
assertEquals(Arrays.asList("job_id_open-1"), openJobs);
|
||||
assertEquals(Collections.singletonList("job_id_open-1"), openJobs);
|
||||
assertEquals(Collections.emptyList(), closingJobs);
|
||||
openJobs.clear();
|
||||
closingJobs.clear();
|
||||
@ -316,8 +316,8 @@ public class CloseJobActionRequestTests extends AbstractStreamableXContentTestCa
|
||||
}
|
||||
|
||||
public void testBuildWaitForCloseRequest() {
|
||||
List<String> openJobIds = Arrays.asList(new String[] {"openjob1", "openjob2"});
|
||||
List<String> closingJobIds = Arrays.asList(new String[] {"closingjob1"});
|
||||
List<String> openJobIds = Arrays.asList("openjob1", "openjob2");
|
||||
List<String> closingJobIds = Collections.singletonList("closingjob1");
|
||||
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("openjob1", null, JobState.OPENED, tasksBuilder);
|
||||
@ -343,4 +343,4 @@ public class CloseJobActionRequestTests extends AbstractStreamableXContentTestCa
|
||||
tasks.updateTaskStatus(MlMetadata.datafeedTaskId(datafeedId), state);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,25 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.action;
|
||||
|
||||
import org.elasticsearch.test.AbstractStreamableTestCase;
|
||||
|
||||
public class GetCalendarsActionRequestTests extends AbstractStreamableTestCase<GetCalendarsAction.Request> {
|
||||
|
||||
|
||||
@Override
|
||||
protected GetCalendarsAction.Request createTestInstance() {
|
||||
GetCalendarsAction.Request request = new GetCalendarsAction.Request();
|
||||
request.setCalendarId(randomAlphaOfLengthBetween(1, 20));
|
||||
return request;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GetCalendarsAction.Request createBlankInstance() {
|
||||
return new GetCalendarsAction.Request();
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.action;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractStreamableXContentTestCase;
|
||||
import org.elasticsearch.xpack.ml.calendars.Calendar;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class PutCalendarActionRequestTests extends AbstractStreamableXContentTestCase<PutCalendarAction.Request> {
|
||||
|
||||
private final String calendarId = randomAlphaOfLengthBetween(1, 20);
|
||||
|
||||
@Override
|
||||
protected PutCalendarAction.Request createTestInstance() {
|
||||
int size = randomInt(10);
|
||||
List<String> jobIds = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
jobIds.add(randomAlphaOfLengthBetween(1, 20));
|
||||
}
|
||||
Calendar calendar = new Calendar(calendarId, jobIds);
|
||||
return new PutCalendarAction.Request(calendar);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PutCalendarAction.Request createBlankInstance() {
|
||||
return new PutCalendarAction.Request();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PutCalendarAction.Request doParseInstance(XContentParser parser) {
|
||||
return PutCalendarAction.Request.parseRequest(calendarId, parser);
|
||||
}
|
||||
}
|
@ -43,7 +43,7 @@ public class StartDatafeedActionTests extends ESTestCase {
|
||||
PersistentTasksCustomMetaData tasks = PersistentTasksCustomMetaData.builder().build();
|
||||
DatafeedConfig datafeedConfig1 = DatafeedManagerTests.createDatafeedConfig("foo-datafeed", "job_id").build();
|
||||
MlMetadata mlMetadata2 = new MlMetadata.Builder(mlMetadata1)
|
||||
.putDatafeed(datafeedConfig1)
|
||||
.putDatafeed(datafeedConfig1, null)
|
||||
.build();
|
||||
Exception e = expectThrows(ElasticsearchStatusException.class,
|
||||
() -> StartDatafeedAction.validate("foo-datafeed", mlMetadata2, tasks));
|
||||
@ -60,7 +60,7 @@ public class StartDatafeedActionTests extends ESTestCase {
|
||||
PersistentTasksCustomMetaData tasks = tasksBuilder.build();
|
||||
DatafeedConfig datafeedConfig1 = DatafeedManagerTests.createDatafeedConfig("foo-datafeed", "job_id").build();
|
||||
MlMetadata mlMetadata2 = new MlMetadata.Builder(mlMetadata1)
|
||||
.putDatafeed(datafeedConfig1)
|
||||
.putDatafeed(datafeedConfig1, null)
|
||||
.build();
|
||||
|
||||
StartDatafeedAction.validate("foo-datafeed", mlMetadata2, tasks);
|
||||
@ -76,7 +76,7 @@ public class StartDatafeedActionTests extends ESTestCase {
|
||||
PersistentTasksCustomMetaData tasks = tasksBuilder.build();
|
||||
DatafeedConfig datafeedConfig1 = DatafeedManagerTests.createDatafeedConfig("foo-datafeed", "job_id").build();
|
||||
MlMetadata mlMetadata2 = new MlMetadata.Builder(mlMetadata1)
|
||||
.putDatafeed(datafeedConfig1)
|
||||
.putDatafeed(datafeedConfig1, null)
|
||||
.build();
|
||||
|
||||
StartDatafeedAction.validate("foo-datafeed", mlMetadata2, tasks);
|
||||
|
@ -20,7 +20,6 @@ import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData;
|
||||
import org.elasticsearch.xpack.persistent.PersistentTasksCustomMetaData.Assignment;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
@ -66,7 +65,7 @@ public class StopDatafeedActionRequestTests extends AbstractStreamableXContentTe
|
||||
tasksBuilder.addTask(MlMetadata.datafeedTaskId("foo"), StartDatafeedAction.TASK_NAME,
|
||||
new StartDatafeedAction.DatafeedParams("foo", 0L), new Assignment("node_id", ""));
|
||||
tasksBuilder.updateTaskStatus(MlMetadata.datafeedTaskId("foo"), DatafeedState.STARTED);
|
||||
PersistentTasksCustomMetaData tasks = tasksBuilder.build();
|
||||
tasksBuilder.build();
|
||||
|
||||
Job job = createDatafeedJob().build(new Date());
|
||||
MlMetadata mlMetadata1 = new MlMetadata.Builder().putJob(job, false).build();
|
||||
@ -76,7 +75,7 @@ public class StopDatafeedActionRequestTests extends AbstractStreamableXContentTe
|
||||
|
||||
DatafeedConfig datafeedConfig = createDatafeedConfig("foo", "job_id").build();
|
||||
MlMetadata mlMetadata2 = new MlMetadata.Builder().putJob(job, false)
|
||||
.putDatafeed(datafeedConfig)
|
||||
.putDatafeed(datafeedConfig, null)
|
||||
.build();
|
||||
StopDatafeedAction.validateDatafeedTask("foo", mlMetadata2);
|
||||
}
|
||||
@ -88,12 +87,12 @@ public class StopDatafeedActionRequestTests extends AbstractStreamableXContentTe
|
||||
addTask("datafeed_1", 0L, "node-1", DatafeedState.STARTED, tasksBuilder);
|
||||
Job job = BaseMlIntegTestCase.createScheduledJob("job_id_1").build(new Date());
|
||||
DatafeedConfig datafeedConfig = createDatafeedConfig("datafeed_1", "job_id_1").build();
|
||||
mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig);
|
||||
mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, null);
|
||||
|
||||
addTask("datafeed_2", 0L, "node-1", DatafeedState.STOPPED, tasksBuilder);
|
||||
job = BaseMlIntegTestCase.createScheduledJob("job_id_2").build(new Date());
|
||||
datafeedConfig = createDatafeedConfig("datafeed_2", "job_id_2").build();
|
||||
mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig);
|
||||
mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, null);
|
||||
|
||||
PersistentTasksCustomMetaData tasks = tasksBuilder.build();
|
||||
MlMetadata mlMetadata = mlMetadataBuilder.build();
|
||||
@ -102,7 +101,7 @@ public class StopDatafeedActionRequestTests extends AbstractStreamableXContentTe
|
||||
List<String> stoppingDatafeeds = new ArrayList<>();
|
||||
StopDatafeedAction.resolveDataFeedIds(new StopDatafeedAction.Request("datafeed_1"), mlMetadata, tasks, startedDatafeeds,
|
||||
stoppingDatafeeds);
|
||||
assertEquals(Arrays.asList("datafeed_1"), startedDatafeeds);
|
||||
assertEquals(Collections.singletonList("datafeed_1"), startedDatafeeds);
|
||||
assertEquals(Collections.emptyList(), stoppingDatafeeds);
|
||||
|
||||
startedDatafeeds.clear();
|
||||
@ -120,17 +119,17 @@ public class StopDatafeedActionRequestTests extends AbstractStreamableXContentTe
|
||||
addTask("datafeed_1", 0L, "node-1", DatafeedState.STARTED, tasksBuilder);
|
||||
Job job = BaseMlIntegTestCase.createScheduledJob("job_id_1").build(new Date());
|
||||
DatafeedConfig datafeedConfig = createDatafeedConfig("datafeed_1", "job_id_1").build();
|
||||
mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig);
|
||||
mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, null);
|
||||
|
||||
addTask("datafeed_2", 0L, "node-1", DatafeedState.STOPPED, tasksBuilder);
|
||||
job = BaseMlIntegTestCase.createScheduledJob("job_id_2").build(new Date());
|
||||
datafeedConfig = createDatafeedConfig("datafeed_2", "job_id_2").build();
|
||||
mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig);
|
||||
mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, null);
|
||||
|
||||
addTask("datafeed_3", 0L, "node-1", DatafeedState.STOPPING, tasksBuilder);
|
||||
job = BaseMlIntegTestCase.createScheduledJob("job_id_3").build(new Date());
|
||||
datafeedConfig = createDatafeedConfig("datafeed_3", "job_id_3").build();
|
||||
mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig);
|
||||
mlMetadataBuilder.putJob(job, false).putDatafeed(datafeedConfig, null);
|
||||
|
||||
PersistentTasksCustomMetaData tasks = tasksBuilder.build();
|
||||
MlMetadata mlMetadata = mlMetadataBuilder.build();
|
||||
@ -139,8 +138,8 @@ public class StopDatafeedActionRequestTests extends AbstractStreamableXContentTe
|
||||
List<String> stoppingDatafeeds = new ArrayList<>();
|
||||
StopDatafeedAction.resolveDataFeedIds(new StopDatafeedAction.Request("_all"), mlMetadata, tasks, startedDatafeeds,
|
||||
stoppingDatafeeds);
|
||||
assertEquals(Arrays.asList("datafeed_1"), startedDatafeeds);
|
||||
assertEquals(Arrays.asList("datafeed_3"), stoppingDatafeeds);
|
||||
assertEquals(Collections.singletonList("datafeed_1"), startedDatafeeds);
|
||||
assertEquals(Collections.singletonList("datafeed_3"), stoppingDatafeeds);
|
||||
|
||||
startedDatafeeds.clear();
|
||||
stoppingDatafeeds.clear();
|
||||
|
@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.ml.calendars;
|
||||
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractSerializingTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class CalendarTests extends AbstractSerializingTestCase<Calendar> {
|
||||
|
||||
@Override
|
||||
protected Calendar createTestInstance() {
|
||||
int size = randomInt(10);
|
||||
List<String> items = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
items.add(randomAlphaOfLengthBetween(1, 20));
|
||||
}
|
||||
return new Calendar(randomAlphaOfLengthBetween(1, 20), items);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Writeable.Reader<Calendar> instanceReader() {
|
||||
return Calendar::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Calendar doParseInstance(XContentParser parser) throws IOException {
|
||||
return Calendar.PARSER.apply(parser, null).build();
|
||||
}
|
||||
|
||||
public void testNullId() {
|
||||
NullPointerException ex = expectThrows(NullPointerException.class, () -> new Calendar(null, Collections.emptyList()));
|
||||
assertEquals(Calendar.ID.getPreferredName() + " must not be null", ex.getMessage());
|
||||
}
|
||||
|
||||
public void testDocumentId() {
|
||||
assertThat(Calendar.documentId("foo"), equalTo("calendar_foo"));
|
||||
}
|
||||
}
|
@ -6,6 +6,7 @@
|
||||
package org.elasticsearch.xpack.ml.calendars;
|
||||
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractSerializingTestCase;
|
||||
import org.elasticsearch.xpack.ml.job.config.Connective;
|
||||
@ -25,8 +26,7 @@ import java.util.List;
|
||||
|
||||
public class SpecialEventTests extends AbstractSerializingTestCase<SpecialEvent> {
|
||||
|
||||
@Override
|
||||
protected SpecialEvent createTestInstance() {
|
||||
public static SpecialEvent createSpecialEvent() {
|
||||
int size = randomInt(10);
|
||||
List<String> jobIds = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
@ -39,6 +39,11 @@ public class SpecialEventTests extends AbstractSerializingTestCase<SpecialEvent>
|
||||
jobIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SpecialEvent createTestInstance() {
|
||||
return createSpecialEvent();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Writeable.Reader<SpecialEvent> instanceReader() {
|
||||
return SpecialEvent::new;
|
||||
@ -50,8 +55,9 @@ public class SpecialEventTests extends AbstractSerializingTestCase<SpecialEvent>
|
||||
}
|
||||
|
||||
public void testToDetectionRule() {
|
||||
long bucketSpanSecs = 300;
|
||||
SpecialEvent event = createTestInstance();
|
||||
DetectionRule rule = event.toDetectionRule();
|
||||
DetectionRule rule = event.toDetectionRule(TimeValue.timeValueSeconds(bucketSpanSecs));
|
||||
|
||||
assertEquals(Connective.AND, rule.getConditionsConnective());
|
||||
assertEquals(RuleAction.SKIP_SAMPLING_AND_FILTER_RESULTS, rule.getRuleAction());
|
||||
@ -61,10 +67,18 @@ public class SpecialEventTests extends AbstractSerializingTestCase<SpecialEvent>
|
||||
List<RuleCondition> conditions = rule.getRuleConditions();
|
||||
assertEquals(2, conditions.size());
|
||||
assertEquals(RuleConditionType.TIME, conditions.get(0).getConditionType());
|
||||
assertEquals(Operator.GTE, conditions.get(0).getCondition().getOperator());
|
||||
assertEquals(event.getStartTime().toEpochSecond(), Long.parseLong(conditions.get(0).getCondition().getValue()));
|
||||
assertEquals(RuleConditionType.TIME, conditions.get(1).getConditionType());
|
||||
assertEquals(Operator.GTE, conditions.get(0).getCondition().getOperator());
|
||||
assertEquals(Operator.LT, conditions.get(1).getCondition().getOperator());
|
||||
assertEquals(event.getEndTime().toEpochSecond(), Long.parseLong(conditions.get(1).getCondition().getValue()));
|
||||
|
||||
// Check times are aligned with the bucket
|
||||
long conditionStartTime = Long.parseLong(conditions.get(0).getCondition().getValue());
|
||||
assertEquals(0, conditionStartTime % bucketSpanSecs);
|
||||
long bucketCount = conditionStartTime / bucketSpanSecs;
|
||||
assertEquals(bucketSpanSecs * bucketCount, conditionStartTime);
|
||||
|
||||
long conditionEndTime = Long.parseLong(conditions.get(1).getCondition().getValue());
|
||||
assertEquals(0, conditionEndTime % bucketSpanSecs);
|
||||
assertEquals(bucketSpanSecs * (bucketCount + 1), conditionEndTime);
|
||||
}
|
||||
}
|
@ -16,8 +16,10 @@ import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
@ -80,7 +82,7 @@ public class DatafeedManagerTests extends ESTestCase {
|
||||
Job job = createDatafeedJob().build(new Date());
|
||||
mlMetadata.putJob(job, false);
|
||||
DatafeedConfig datafeed = createDatafeedConfig("datafeed_id", job.getId()).build();
|
||||
mlMetadata.putDatafeed(datafeed);
|
||||
mlMetadata.putDatafeed(datafeed, null);
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder);
|
||||
PersistentTasksCustomMetaData tasks = tasksBuilder.build();
|
||||
@ -109,6 +111,7 @@ public class DatafeedManagerTests extends ESTestCase {
|
||||
|
||||
auditor = mock(Auditor.class);
|
||||
threadPool = mock(ThreadPool.class);
|
||||
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
|
||||
ExecutorService executorService = mock(ExecutorService.class);
|
||||
doAnswer(invocation -> {
|
||||
((Runnable) invocation.getArguments()[0]).run();
|
||||
@ -248,7 +251,7 @@ public class DatafeedManagerTests extends ESTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testDatafeedTaskWaitsUntilJobIsOpened() throws Exception {
|
||||
public void testDatafeedTaskWaitsUntilJobIsOpened() {
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder);
|
||||
ClusterState.Builder cs = ClusterState.builder(clusterService.state())
|
||||
@ -288,7 +291,7 @@ public class DatafeedManagerTests extends ESTestCase {
|
||||
verify(threadPool, times(1)).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME);
|
||||
}
|
||||
|
||||
public void testDatafeedTaskStopsBecauseJobFailedWhileOpening() throws Exception {
|
||||
public void testDatafeedTaskStopsBecauseJobFailedWhileOpening() {
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder);
|
||||
ClusterState.Builder cs = ClusterState.builder(clusterService.state())
|
||||
@ -316,7 +319,7 @@ public class DatafeedManagerTests extends ESTestCase {
|
||||
verify(task).stop("job_never_opened", TimeValue.timeValueSeconds(20));
|
||||
}
|
||||
|
||||
public void testDatafeedGetsStoppedWhileWaitingForJobToOpen() throws Exception {
|
||||
public void testDatafeedGetsStoppedWhileWaitingForJobToOpen() {
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask("job_id", "node_id", JobState.OPENING, tasksBuilder);
|
||||
ClusterState.Builder cs = ClusterState.builder(clusterService.state())
|
||||
|
@ -63,11 +63,11 @@ public class DatafeedNodeSelectorTests extends ESTestCase {
|
||||
.build();
|
||||
}
|
||||
|
||||
public void testSelectNode_GivenJobIsOpened() throws Exception {
|
||||
public void testSelectNode_GivenJobIsOpened() {
|
||||
MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder();
|
||||
Job job = createScheduledJob("job_id").build(new Date());
|
||||
mlMetadataBuilder.putJob(job, false);
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")));
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), null);
|
||||
mlMetadata = mlMetadataBuilder.build();
|
||||
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
@ -81,11 +81,11 @@ public class DatafeedNodeSelectorTests extends ESTestCase {
|
||||
new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated();
|
||||
}
|
||||
|
||||
public void testSelectNode_GivenJobIsOpening() throws Exception {
|
||||
public void testSelectNode_GivenJobIsOpening() {
|
||||
MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder();
|
||||
Job job = createScheduledJob("job_id").build(new Date());
|
||||
mlMetadataBuilder.putJob(job, false);
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")));
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), null);
|
||||
mlMetadata = mlMetadataBuilder.build();
|
||||
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
@ -99,13 +99,13 @@ public class DatafeedNodeSelectorTests extends ESTestCase {
|
||||
new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated();
|
||||
}
|
||||
|
||||
public void testNoJobTask() throws Exception {
|
||||
public void testNoJobTask() {
|
||||
MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder();
|
||||
Job job = createScheduledJob("job_id").build(new Date());
|
||||
mlMetadataBuilder.putJob(job, false);
|
||||
|
||||
// Using wildcard index name to test for index resolving as well
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")));
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")), null);
|
||||
mlMetadata = mlMetadataBuilder.build();
|
||||
|
||||
tasks = PersistentTasksCustomMetaData.builder().build();
|
||||
@ -123,11 +123,11 @@ public class DatafeedNodeSelectorTests extends ESTestCase {
|
||||
+ "[cannot start datafeed [datafeed_id], because job's [job_id] state is [closed] while state [opened] is required]"));
|
||||
}
|
||||
|
||||
public void testSelectNode_GivenJobFailedOrClosed() throws Exception {
|
||||
public void testSelectNode_GivenJobFailedOrClosed() {
|
||||
MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder();
|
||||
Job job = createScheduledJob("job_id").build(new Date());
|
||||
mlMetadataBuilder.putJob(job, false);
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")));
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), null);
|
||||
mlMetadata = mlMetadataBuilder.build();
|
||||
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
@ -149,13 +149,13 @@ public class DatafeedNodeSelectorTests extends ESTestCase {
|
||||
+ "] while state [opened] is required]"));
|
||||
}
|
||||
|
||||
public void testShardUnassigned() throws Exception {
|
||||
public void testShardUnassigned() {
|
||||
MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder();
|
||||
Job job = createScheduledJob("job_id").build(new Date());
|
||||
mlMetadataBuilder.putJob(job, false);
|
||||
|
||||
// Using wildcard index name to test for index resolving as well
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")));
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")), null);
|
||||
mlMetadata = mlMetadataBuilder.build();
|
||||
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
@ -175,13 +175,13 @@ public class DatafeedNodeSelectorTests extends ESTestCase {
|
||||
new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated();
|
||||
}
|
||||
|
||||
public void testShardNotAllActive() throws Exception {
|
||||
public void testShardNotAllActive() {
|
||||
MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder();
|
||||
Job job = createScheduledJob("job_id").build(new Date());
|
||||
mlMetadataBuilder.putJob(job, false);
|
||||
|
||||
// Using wildcard index name to test for index resolving as well
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")));
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")), null);
|
||||
mlMetadata = mlMetadataBuilder.build();
|
||||
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
@ -202,11 +202,11 @@ public class DatafeedNodeSelectorTests extends ESTestCase {
|
||||
new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated();
|
||||
}
|
||||
|
||||
public void testIndexDoesntExist() throws Exception {
|
||||
public void testIndexDoesntExist() {
|
||||
MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder();
|
||||
Job job = createScheduledJob("job_id").build(new Date());
|
||||
mlMetadataBuilder.putJob(job, false);
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")));
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")), null);
|
||||
mlMetadata = mlMetadataBuilder.build();
|
||||
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
@ -226,11 +226,28 @@ public class DatafeedNodeSelectorTests extends ESTestCase {
|
||||
+ "[cannot start datafeed [datafeed_id] because index [not_foo] does not exist, is closed, or is still initializing.]"));
|
||||
}
|
||||
|
||||
public void testRemoteIndex() {
|
||||
MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder();
|
||||
Job job = createScheduledJob("job_id").build(new Date());
|
||||
mlMetadataBuilder.putJob(job, false);
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("remote:foo")), null);
|
||||
mlMetadata = mlMetadataBuilder.build();
|
||||
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder);
|
||||
tasks = tasksBuilder.build();
|
||||
|
||||
givenClusterState("foo", 1, 0);
|
||||
|
||||
PersistentTasksCustomMetaData.Assignment result = new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").selectNode();
|
||||
assertNotNull(result.getExecutorNode());
|
||||
}
|
||||
|
||||
public void testSelectNode_jobTaskStale() {
|
||||
MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder();
|
||||
Job job = createScheduledJob("job_id").build(new Date());
|
||||
mlMetadataBuilder.putJob(job, false);
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")));
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")), null);
|
||||
mlMetadata = mlMetadataBuilder.build();
|
||||
|
||||
String nodeId = randomBoolean() ? "node_id2" : null;
|
||||
@ -261,14 +278,14 @@ public class DatafeedNodeSelectorTests extends ESTestCase {
|
||||
new DatafeedNodeSelector(clusterState, resolver, "datafeed_id").checkDatafeedTaskCanBeCreated();
|
||||
}
|
||||
|
||||
public void testSelectNode_GivenJobOpeningAndIndexDoesNotExist() throws Exception {
|
||||
public void testSelectNode_GivenJobOpeningAndIndexDoesNotExist() {
|
||||
// Here we test that when there are 2 problems, the most critical gets reported first.
|
||||
// In this case job is Opening (non-critical) and the index does not exist (critical)
|
||||
|
||||
MlMetadata.Builder mlMetadataBuilder = new MlMetadata.Builder();
|
||||
Job job = createScheduledJob("job_id").build(new Date());
|
||||
mlMetadataBuilder.putJob(job, false);
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")));
|
||||
mlMetadataBuilder.putDatafeed(createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")), null);
|
||||
mlMetadata = mlMetadataBuilder.build();
|
||||
|
||||
PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder();
|
||||
@ -339,4 +356,4 @@ public class DatafeedNodeSelectorTests extends ESTestCase {
|
||||
|
||||
return new RoutingTable.Builder().add(rtBuilder).build();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -25,9 +25,7 @@ import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField;
|
||||
import org.elasticsearch.test.AbstractSerializingTestCase;
|
||||
import org.elasticsearch.xpack.ml.datafeed.ChunkingConfig.Mode;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
@ -93,7 +91,7 @@ public class DatafeedUpdateTests extends AbstractSerializingTestCase<DatafeedUpd
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DatafeedUpdate doParseInstance(XContentParser parser) throws IOException {
|
||||
protected DatafeedUpdate doParseInstance(XContentParser parser) {
|
||||
return DatafeedUpdate.PARSER.apply(parser, null).build();
|
||||
}
|
||||
|
||||
@ -111,12 +109,12 @@ public class DatafeedUpdateTests extends AbstractSerializingTestCase<DatafeedUpd
|
||||
|
||||
public void testApply_failBecauseTargetDatafeedHasDifferentId() {
|
||||
DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig("foo");
|
||||
expectThrows(IllegalArgumentException.class, () -> createRandomized(datafeed.getId() + "_2").apply(datafeed));
|
||||
expectThrows(IllegalArgumentException.class, () -> createRandomized(datafeed.getId() + "_2").apply(datafeed, null));
|
||||
}
|
||||
|
||||
public void testApply_givenEmptyUpdate() {
|
||||
DatafeedConfig datafeed = DatafeedConfigTests.createRandomizedDatafeedConfig("foo");
|
||||
DatafeedConfig updatedDatafeed = new DatafeedUpdate.Builder(datafeed.getId()).build().apply(datafeed);
|
||||
DatafeedConfig updatedDatafeed = new DatafeedUpdate.Builder(datafeed.getId()).build().apply(datafeed, null);
|
||||
assertThat(datafeed, equalTo(updatedDatafeed));
|
||||
}
|
||||
|
||||
@ -127,7 +125,7 @@ public class DatafeedUpdateTests extends AbstractSerializingTestCase<DatafeedUpd
|
||||
|
||||
DatafeedUpdate.Builder updated = new DatafeedUpdate.Builder(datafeed.getId());
|
||||
updated.setScrollSize(datafeed.getScrollSize() + 1);
|
||||
DatafeedConfig updatedDatafeed = update.build().apply(datafeed);
|
||||
DatafeedConfig updatedDatafeed = update.build().apply(datafeed, null);
|
||||
|
||||
DatafeedConfig.Builder expectedDatafeed = new DatafeedConfig.Builder(datafeed);
|
||||
expectedDatafeed.setScrollSize(datafeed.getScrollSize() + 1);
|
||||
@ -136,40 +134,40 @@ public class DatafeedUpdateTests extends AbstractSerializingTestCase<DatafeedUpd
|
||||
|
||||
public void testApply_givenFullUpdateNoAggregations() {
|
||||
DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder("foo", "foo-feed");
|
||||
datafeedBuilder.setIndices(Arrays.asList("i_1"));
|
||||
datafeedBuilder.setTypes(Arrays.asList("t_1"));
|
||||
datafeedBuilder.setIndices(Collections.singletonList("i_1"));
|
||||
datafeedBuilder.setTypes(Collections.singletonList("t_1"));
|
||||
DatafeedConfig datafeed = datafeedBuilder.build();
|
||||
|
||||
DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeed.getId());
|
||||
update.setJobId("bar");
|
||||
update.setIndices(Arrays.asList("i_2"));
|
||||
update.setTypes(Arrays.asList("t_2"));
|
||||
update.setIndices(Collections.singletonList("i_2"));
|
||||
update.setTypes(Collections.singletonList("t_2"));
|
||||
update.setQueryDelay(TimeValue.timeValueSeconds(42));
|
||||
update.setFrequency(TimeValue.timeValueSeconds(142));
|
||||
update.setQuery(QueryBuilders.termQuery("a", "b"));
|
||||
update.setScriptFields(Arrays.asList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false)));
|
||||
update.setScriptFields(Collections.singletonList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false)));
|
||||
update.setScrollSize(8000);
|
||||
update.setChunkingConfig(ChunkingConfig.newManual(TimeValue.timeValueHours(1)));
|
||||
|
||||
DatafeedConfig updatedDatafeed = update.build().apply(datafeed);
|
||||
DatafeedConfig updatedDatafeed = update.build().apply(datafeed, null);
|
||||
|
||||
assertThat(updatedDatafeed.getJobId(), equalTo("bar"));
|
||||
assertThat(updatedDatafeed.getIndices(), equalTo(Arrays.asList("i_2")));
|
||||
assertThat(updatedDatafeed.getTypes(), equalTo(Arrays.asList("t_2")));
|
||||
assertThat(updatedDatafeed.getIndices(), equalTo(Collections.singletonList("i_2")));
|
||||
assertThat(updatedDatafeed.getTypes(), equalTo(Collections.singletonList("t_2")));
|
||||
assertThat(updatedDatafeed.getQueryDelay(), equalTo(TimeValue.timeValueSeconds(42)));
|
||||
assertThat(updatedDatafeed.getFrequency(), equalTo(TimeValue.timeValueSeconds(142)));
|
||||
assertThat(updatedDatafeed.getQuery(), equalTo(QueryBuilders.termQuery("a", "b")));
|
||||
assertThat(updatedDatafeed.hasAggregations(), is(false));
|
||||
assertThat(updatedDatafeed.getScriptFields(),
|
||||
equalTo(Arrays.asList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false))));
|
||||
equalTo(Collections.singletonList(new SearchSourceBuilder.ScriptField("a", mockScript("b"), false))));
|
||||
assertThat(updatedDatafeed.getScrollSize(), equalTo(8000));
|
||||
assertThat(updatedDatafeed.getChunkingConfig(), equalTo(ChunkingConfig.newManual(TimeValue.timeValueHours(1))));
|
||||
}
|
||||
|
||||
public void testApply_givenAggregations() {
|
||||
DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder("foo", "foo-feed");
|
||||
datafeedBuilder.setIndices(Arrays.asList("i_1"));
|
||||
datafeedBuilder.setTypes(Arrays.asList("t_1"));
|
||||
datafeedBuilder.setIndices(Collections.singletonList("i_1"));
|
||||
datafeedBuilder.setTypes(Collections.singletonList("t_1"));
|
||||
DatafeedConfig datafeed = datafeedBuilder.build();
|
||||
|
||||
DatafeedUpdate.Builder update = new DatafeedUpdate.Builder(datafeed.getId());
|
||||
@ -177,17 +175,17 @@ public class DatafeedUpdateTests extends AbstractSerializingTestCase<DatafeedUpd
|
||||
update.setAggregations(new AggregatorFactories.Builder().addAggregator(
|
||||
AggregationBuilders.histogram("a").interval(300000).field("time").subAggregation(maxTime)));
|
||||
|
||||
DatafeedConfig updatedDatafeed = update.build().apply(datafeed);
|
||||
DatafeedConfig updatedDatafeed = update.build().apply(datafeed, null);
|
||||
|
||||
assertThat(updatedDatafeed.getIndices(), equalTo(Arrays.asList("i_1")));
|
||||
assertThat(updatedDatafeed.getTypes(), equalTo(Arrays.asList("t_1")));
|
||||
assertThat(updatedDatafeed.getIndices(), equalTo(Collections.singletonList("i_1")));
|
||||
assertThat(updatedDatafeed.getTypes(), equalTo(Collections.singletonList("t_1")));
|
||||
assertThat(updatedDatafeed.getAggregations(),
|
||||
equalTo(new AggregatorFactories.Builder().addAggregator(
|
||||
AggregationBuilders.histogram("a").interval(300000).field("time").subAggregation(maxTime))));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DatafeedUpdate mutateInstance(DatafeedUpdate instance) throws IOException {
|
||||
protected DatafeedUpdate mutateInstance(DatafeedUpdate instance) {
|
||||
DatafeedUpdate.Builder builder = new DatafeedUpdate.Builder(instance);
|
||||
switch (between(0, 10)) {
|
||||
case 0:
|
||||
|
@ -186,7 +186,7 @@ public class AggregationDataExtractorTests extends ESTestCase {
|
||||
assertThat(capturedSearchRequests.size(), equalTo(1));
|
||||
}
|
||||
|
||||
public void testExtractionGivenResponseHasMultipleTopLevelAggs() throws IOException {
|
||||
public void testExtractionGivenResponseHasMultipleTopLevelAggs() {
|
||||
TestDataExtractor extractor = new TestDataExtractor(1000L, 2000L);
|
||||
|
||||
Histogram histogram1 = mock(Histogram.class);
|
||||
@ -203,7 +203,7 @@ public class AggregationDataExtractorTests extends ESTestCase {
|
||||
assertThat(e.getMessage(), containsString("Multiple top level aggregations not supported; found: [hist_1, hist_2]"));
|
||||
}
|
||||
|
||||
public void testExtractionGivenCancelBeforeNext() throws IOException {
|
||||
public void testExtractionGivenCancelBeforeNext() {
|
||||
TestDataExtractor extractor = new TestDataExtractor(1000L, 4000L);
|
||||
SearchResponse response = createSearchResponse("time", Collections.emptyList());
|
||||
extractor.setNextResponse(response);
|
||||
@ -249,7 +249,7 @@ public class AggregationDataExtractorTests extends ESTestCase {
|
||||
expectThrows(IOException.class, extractor::next);
|
||||
}
|
||||
|
||||
public void testExtractionGivenSearchResponseHasShardFailures() throws IOException {
|
||||
public void testExtractionGivenSearchResponseHasShardFailures() {
|
||||
TestDataExtractor extractor = new TestDataExtractor(1000L, 2000L);
|
||||
extractor.setNextResponse(createResponseWithShardFailures());
|
||||
|
||||
@ -257,7 +257,7 @@ public class AggregationDataExtractorTests extends ESTestCase {
|
||||
IOException e = expectThrows(IOException.class, extractor::next);
|
||||
}
|
||||
|
||||
public void testExtractionGivenInitSearchResponseEncounteredUnavailableShards() throws IOException {
|
||||
public void testExtractionGivenInitSearchResponseEncounteredUnavailableShards() {
|
||||
TestDataExtractor extractor = new TestDataExtractor(1000L, 2000L);
|
||||
extractor.setNextResponse(createResponseWithUnavailableShards(2));
|
||||
|
||||
@ -267,7 +267,8 @@ public class AggregationDataExtractorTests extends ESTestCase {
|
||||
}
|
||||
|
||||
private AggregationDataExtractorContext createContext(long start, long end) {
|
||||
return new AggregationDataExtractorContext(jobId, timeField, fields, indices, types, query, aggs, start, end, true);
|
||||
return new AggregationDataExtractorContext(jobId, timeField, fields, indices, types, query, aggs, start, end, true,
|
||||
Collections.emptyMap());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -321,7 +321,7 @@ public class ChunkedDataExtractorTests extends ESTestCase {
|
||||
assertThat(searchRequest, containsString("\"from\":200000,\"to\":400000"));
|
||||
}
|
||||
|
||||
public void testCancelGivenNextWasNeverCalled() throws IOException {
|
||||
public void testCancelGivenNextWasNeverCalled() {
|
||||
chunkSpan = TimeValue.timeValueSeconds(1);
|
||||
TestDataExtractor extractor = new TestDataExtractor(1000L, 2300L);
|
||||
extractor.setNextResponse(createSearchResponse(10L, 1000L, 2200L));
|
||||
@ -446,7 +446,7 @@ public class ChunkedDataExtractorTests extends ESTestCase {
|
||||
|
||||
private ChunkedDataExtractorContext createContext(long start, long end) {
|
||||
return new ChunkedDataExtractorContext(jobId, timeField, indices, types, query, scrollSize, start, end, chunkSpan,
|
||||
ChunkedDataExtractorFactory.newIdentityTimeAligner());
|
||||
ChunkedDataExtractorFactory.newIdentityTimeAligner(), Collections.emptyMap());
|
||||
}
|
||||
|
||||
private static class StubSubExtractor implements DataExtractor {
|
||||
@ -465,7 +465,7 @@ public class ChunkedDataExtractorTests extends ESTestCase {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<InputStream> next() throws IOException {
|
||||
public Optional<InputStream> next() {
|
||||
if (streams.isEmpty()) {
|
||||
hasNext = false;
|
||||
return Optional.empty();
|
||||
|
@ -15,6 +15,8 @@ import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
@ -24,6 +26,7 @@ import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.junit.Before;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
|
||||
@ -117,7 +120,10 @@ public class ScrollDataExtractorTests extends ESTestCase {
|
||||
|
||||
@Before
|
||||
public void setUpTests() {
|
||||
ThreadPool threadPool = mock(ThreadPool.class);
|
||||
when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY));
|
||||
client = mock(Client.class);
|
||||
when(client.threadPool()).thenReturn(threadPool);
|
||||
capturedSearchRequests = new ArrayList<>();
|
||||
capturedContinueScrollIds = new ArrayList<>();
|
||||
jobId = "test-job";
|
||||
@ -269,7 +275,7 @@ public class ScrollDataExtractorTests extends ESTestCase {
|
||||
extractor.setNextResponse(createErrorResponse());
|
||||
|
||||
assertThat(extractor.hasNext(), is(true));
|
||||
expectThrows(IOException.class, () -> extractor.next());
|
||||
expectThrows(IOException.class, extractor::next);
|
||||
}
|
||||
|
||||
public void testExtractionGivenContinueScrollResponseHasError() throws IOException {
|
||||
@ -288,7 +294,7 @@ public class ScrollDataExtractorTests extends ESTestCase {
|
||||
|
||||
extractor.setNextResponse(createErrorResponse());
|
||||
assertThat(extractor.hasNext(), is(true));
|
||||
expectThrows(IOException.class, () -> extractor.next());
|
||||
expectThrows(IOException.class, extractor::next);
|
||||
}
|
||||
|
||||
public void testExtractionGivenInitSearchResponseHasShardFailures() throws IOException {
|
||||
@ -297,7 +303,7 @@ public class ScrollDataExtractorTests extends ESTestCase {
|
||||
extractor.setNextResponse(createResponseWithShardFailures());
|
||||
|
||||
assertThat(extractor.hasNext(), is(true));
|
||||
expectThrows(IOException.class, () -> extractor.next());
|
||||
expectThrows(IOException.class, extractor::next);
|
||||
}
|
||||
|
||||
public void testExtractionGivenInitSearchResponseEncounteredUnavailableShards() throws IOException {
|
||||
@ -306,7 +312,7 @@ public class ScrollDataExtractorTests extends ESTestCase {
|
||||
extractor.setNextResponse(createResponseWithUnavailableShards(1));
|
||||
|
||||
assertThat(extractor.hasNext(), is(true));
|
||||
IOException e = expectThrows(IOException.class, () -> extractor.next());
|
||||
IOException e = expectThrows(IOException.class, extractor::next);
|
||||
assertThat(e.getMessage(), equalTo("[" + jobId + "] Search request encountered [1] unavailable shards"));
|
||||
}
|
||||
|
||||
@ -333,7 +339,7 @@ public class ScrollDataExtractorTests extends ESTestCase {
|
||||
assertThat(output.isPresent(), is(true));
|
||||
// A second failure is not tolerated
|
||||
assertThat(extractor.hasNext(), is(true));
|
||||
expectThrows(IOException.class, () -> extractor.next());
|
||||
expectThrows(IOException.class, extractor::next);
|
||||
}
|
||||
|
||||
public void testResetScollUsesLastResultTimestamp() throws IOException {
|
||||
@ -389,7 +395,7 @@ public class ScrollDataExtractorTests extends ESTestCase {
|
||||
assertEquals(new Long(1400L), extractor.getLastTimestamp());
|
||||
// A second failure is not tolerated
|
||||
assertThat(extractor.hasNext(), is(true));
|
||||
expectThrows(SearchPhaseExecutionException.class, () -> extractor.next());
|
||||
expectThrows(SearchPhaseExecutionException.class, extractor::next);
|
||||
}
|
||||
|
||||
public void testSearchPhaseExecutionExceptionOnInitScroll() throws IOException {
|
||||
@ -398,7 +404,7 @@ public class ScrollDataExtractorTests extends ESTestCase {
|
||||
extractor.setNextResponse(createResponseWithShardFailures());
|
||||
extractor.setNextResponse(createResponseWithShardFailures());
|
||||
|
||||
expectThrows(IOException.class, () -> extractor.next());
|
||||
expectThrows(IOException.class, extractor::next);
|
||||
|
||||
List<String> capturedClearScrollIds = getCapturedClearScrollIds();
|
||||
assertThat(capturedClearScrollIds.isEmpty(), is(true));
|
||||
@ -412,8 +418,8 @@ public class ScrollDataExtractorTests extends ESTestCase {
|
||||
"script2", new Script(ScriptType.INLINE, "painless", "return domainSplit('foo.com', params);", emptyMap()), false);
|
||||
|
||||
List<SearchSourceBuilder.ScriptField> sFields = Arrays.asList(withoutSplit, withSplit);
|
||||
ScrollDataExtractorContext context = new ScrollDataExtractorContext(jobId, extractedFields, indices,
|
||||
types, query, sFields, scrollSize, 1000, 2000);
|
||||
ScrollDataExtractorContext context = new ScrollDataExtractorContext(jobId, extractedFields, indices,
|
||||
types, query, sFields, scrollSize, 1000, 2000, Collections.emptyMap());
|
||||
|
||||
TestDataExtractor extractor = new TestDataExtractor(context);
|
||||
|
||||
@ -460,7 +466,8 @@ public class ScrollDataExtractorTests extends ESTestCase {
|
||||
}
|
||||
|
||||
private ScrollDataExtractorContext createContext(long start, long end) {
|
||||
return new ScrollDataExtractorContext(jobId, extractedFields, indices, types, query, scriptFields, scrollSize, start, end);
|
||||
return new ScrollDataExtractorContext(jobId, extractedFields, indices, types, query, scriptFields, scrollSize, start, end,
|
||||
Collections.emptyMap());
|
||||
}
|
||||
|
||||
private SearchResponse createEmptySearchResponse() {
|
||||
@ -475,9 +482,9 @@ public class ScrollDataExtractorTests extends ESTestCase {
|
||||
for (int i = 0; i < timestamps.size(); i++) {
|
||||
SearchHit hit = new SearchHit(randomInt());
|
||||
Map<String, DocumentField> fields = new HashMap<>();
|
||||
fields.put(extractedFields.timeField(), new DocumentField("time", Arrays.asList(timestamps.get(i))));
|
||||
fields.put("field_1", new DocumentField("field_1", Arrays.asList(field1Values.get(i))));
|
||||
fields.put("field_2", new DocumentField("field_2", Arrays.asList(field2Values.get(i))));
|
||||
fields.put(extractedFields.timeField(), new DocumentField("time", Collections.singletonList(timestamps.get(i))));
|
||||
fields.put("field_1", new DocumentField("field_1", Collections.singletonList(field1Values.get(i))));
|
||||
fields.put("field_2", new DocumentField("field_2", Collections.singletonList(field2Values.get(i))));
|
||||
hit.fields(fields);
|
||||
hits.add(hit);
|
||||
}
|
||||
@ -519,4 +526,4 @@ public class ScrollDataExtractorTests extends ESTestCase {
|
||||
return reader.lines().collect(Collectors.joining("\n"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.test.SecuritySettingsSource;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.xpack.ml.MachineLearning;
|
||||
import org.elasticsearch.xpack.ml.notifications.Auditor;
|
||||
import org.elasticsearch.xpack.test.rest.XPackRestTestHelper;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
@ -24,8 +25,10 @@ import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@ -39,6 +42,8 @@ public class DatafeedJobsRestIT extends ESRestTestCase {
|
||||
basicAuthHeaderValue("x_pack_rest_user", SecuritySettingsSource.TEST_PASSWORD_SECURE_STRING);
|
||||
private static final String BASIC_AUTH_VALUE_ML_ADMIN =
|
||||
basicAuthHeaderValue("ml_admin", SecuritySettingsSource.TEST_PASSWORD_SECURE_STRING);
|
||||
private static final String BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS =
|
||||
basicAuthHeaderValue("ml_admin_plus_data", SecuritySettingsSource.TEST_PASSWORD_SECURE_STRING);
|
||||
|
||||
@Override
|
||||
protected Settings restClientSettings() {
|
||||
@ -50,25 +55,39 @@ public class DatafeedJobsRestIT extends ESRestTestCase {
|
||||
return true;
|
||||
}
|
||||
|
||||
private void setupUser() throws IOException {
|
||||
String password = new String(SecuritySettingsSource.TEST_PASSWORD_SECURE_STRING.getChars());
|
||||
|
||||
// This user has admin rights on machine learning, but (importantly for the tests) no
|
||||
// rights on any of the data indexes
|
||||
String user = "{"
|
||||
+ " \"password\" : \"" + password + "\","
|
||||
+ " \"roles\" : [ \"machine_learning_admin\" ]"
|
||||
private void setupDataAccessRole(String index) throws IOException {
|
||||
String json = "{"
|
||||
+ " \"indices\" : ["
|
||||
+ " { \"names\": [\"" + index + "\"], \"privileges\": [\"read\"] }"
|
||||
+ " ]"
|
||||
+ "}";
|
||||
|
||||
client().performRequest("put", "_xpack/security/user/ml_admin", Collections.emptyMap(),
|
||||
new StringEntity(user, ContentType.APPLICATION_JSON));
|
||||
client().performRequest("put", "_xpack/security/role/test_data_access", Collections.emptyMap(),
|
||||
new StringEntity(json, ContentType.APPLICATION_JSON));
|
||||
}
|
||||
|
||||
private void setupUser(String user, List<String> roles) throws IOException {
|
||||
String password = new String(SecuritySettingsSource.TEST_PASSWORD_SECURE_STRING.getChars());
|
||||
|
||||
String json = "{"
|
||||
+ " \"password\" : \"" + password + "\","
|
||||
+ " \"roles\" : [ " + roles.stream().map(unquoted -> "\"" + unquoted + "\"").collect(Collectors.joining(", ")) + " ]"
|
||||
+ "}";
|
||||
|
||||
client().performRequest("put", "_xpack/security/user/" + user, Collections.emptyMap(),
|
||||
new StringEntity(json, ContentType.APPLICATION_JSON));
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUpData() throws Exception {
|
||||
setupUser();
|
||||
setupDataAccessRole("network-data");
|
||||
// This user has admin rights on machine learning, but (importantly for the tests) no rights
|
||||
// on any of the data indexes
|
||||
setupUser("ml_admin", Collections.singletonList("machine_learning_admin"));
|
||||
// This user has admin rights on machine learning, and read access to the network-data index
|
||||
setupUser("ml_admin_plus_data", Arrays.asList("machine_learning_admin", "test_data_access"));
|
||||
addAirlineData();
|
||||
addNetworkData();
|
||||
addNetworkData("network-data");
|
||||
}
|
||||
|
||||
private void addAirlineData() throws IOException {
|
||||
@ -221,7 +240,7 @@ public class DatafeedJobsRestIT extends ESRestTestCase {
|
||||
client().performRequest("post", "_refresh");
|
||||
}
|
||||
|
||||
private void addNetworkData() throws IOException {
|
||||
private void addNetworkData(String index) throws IOException {
|
||||
|
||||
// Create index with source = enabled, doc_values = enabled, stored = false + multi-field
|
||||
String mappings = "{"
|
||||
@ -241,19 +260,19 @@ public class DatafeedJobsRestIT extends ESRestTestCase {
|
||||
+ " }"
|
||||
+ " }"
|
||||
+ "}";
|
||||
client().performRequest("put", "network-data", Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON));
|
||||
client().performRequest("put", index, Collections.emptyMap(), new StringEntity(mappings, ContentType.APPLICATION_JSON));
|
||||
|
||||
String docTemplate = "{\"timestamp\":%d,\"host\":\"%s\",\"network_bytes_out\":%d}";
|
||||
Date date = new Date(1464739200735L);
|
||||
for (int i=0; i<120; i++) {
|
||||
long byteCount = randomNonNegativeLong();
|
||||
String jsonDoc = String.format(Locale.ROOT, docTemplate, date.getTime(), "hostA", byteCount);
|
||||
client().performRequest("post", "network-data/doc", Collections.emptyMap(),
|
||||
client().performRequest("post", index + "/doc", Collections.emptyMap(),
|
||||
new StringEntity(jsonDoc, ContentType.APPLICATION_JSON));
|
||||
|
||||
byteCount = randomNonNegativeLong();
|
||||
jsonDoc = String.format(Locale.ROOT, docTemplate, date.getTime(), "hostB", byteCount);
|
||||
client().performRequest("post", "network-data/doc", Collections.emptyMap(),
|
||||
client().performRequest("post", index + "/doc", Collections.emptyMap(),
|
||||
new StringEntity(jsonDoc, ContentType.APPLICATION_JSON));
|
||||
|
||||
date = new Date(date.getTime() + 10_000);
|
||||
@ -263,7 +282,6 @@ public class DatafeedJobsRestIT extends ESRestTestCase {
|
||||
client().performRequest("post", "_refresh");
|
||||
}
|
||||
|
||||
|
||||
public void testLookbackOnlyWithMixedTypes() throws Exception {
|
||||
new LookbackOnlyTestHelper("test-lookback-only-with-mixed-types", "airline-data")
|
||||
.setShouldSucceedProcessing(true).execute();
|
||||
@ -494,6 +512,52 @@ public class DatafeedJobsRestIT extends ESRestTestCase {
|
||||
assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":240"));
|
||||
}
|
||||
|
||||
public void testLookbackWithoutPermissions() throws Exception {
|
||||
String jobId = "permission-test-network-job";
|
||||
String job = "{\"analysis_config\" :{\"bucket_span\":\"300s\","
|
||||
+ "\"summary_count_field_name\":\"doc_count\","
|
||||
+ "\"detectors\":[{\"function\":\"mean\",\"field_name\":\"bytes-delta\",\"by_field_name\":\"hostname\"}]},"
|
||||
+ "\"data_description\" : {\"time_field\":\"timestamp\"}"
|
||||
+ "}";
|
||||
client().performRequest("put", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId, Collections.emptyMap(),
|
||||
new StringEntity(job, ContentType.APPLICATION_JSON));
|
||||
|
||||
String datafeedId = "datafeed-" + jobId;
|
||||
String aggregations =
|
||||
"{\"hostname\": {\"terms\" : {\"field\": \"host.keyword\", \"size\":10},"
|
||||
+ "\"aggs\": {\"buckets\": {\"date_histogram\":{\"field\":\"timestamp\",\"interval\":\"5s\"},"
|
||||
+ "\"aggs\": {\"timestamp\":{\"max\":{\"field\":\"timestamp\"}},"
|
||||
+ "\"bytes-delta\":{\"derivative\":{\"buckets_path\":\"avg_bytes_out\"}},"
|
||||
+ "\"avg_bytes_out\":{\"avg\":{\"field\":\"network_bytes_out\"}} }}}}}";
|
||||
|
||||
// At the time we create the datafeed the user can access the network-data index that we have access to
|
||||
new DatafeedBuilder(datafeedId, jobId, "network-data", "doc")
|
||||
.setAggregations(aggregations)
|
||||
.setChunkingTimespan("300s")
|
||||
.setAuthHeader(BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS)
|
||||
.build();
|
||||
|
||||
// Change the role so that the user can no longer access network-data
|
||||
setupDataAccessRole("some-other-data");
|
||||
|
||||
openJob(client(), jobId);
|
||||
|
||||
startDatafeedAndWaitUntilStopped(datafeedId, BASIC_AUTH_VALUE_ML_ADMIN_WITH_SOME_DATA_ACCESS);
|
||||
waitUntilJobIsClosed(jobId);
|
||||
Response jobStatsResponse = client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats");
|
||||
String jobStatsResponseAsString = responseEntityToString(jobStatsResponse);
|
||||
// We expect that no data made it through to the job
|
||||
assertThat(jobStatsResponseAsString, containsString("\"input_record_count\":0"));
|
||||
assertThat(jobStatsResponseAsString, containsString("\"processed_record_count\":0"));
|
||||
|
||||
// There should be a notification saying that there was a problem extracting data
|
||||
client().performRequest("post", "_refresh");
|
||||
Response notificationsResponse = client().performRequest("get", Auditor.NOTIFICATIONS_INDEX + "/_search?q=job_id:" + jobId);
|
||||
String notificationsResponseAsString = responseEntityToString(notificationsResponse);
|
||||
assertThat(notificationsResponseAsString, containsString("\"message\":\"Datafeed is encountering errors extracting data: " +
|
||||
"action [indices:data/read/search] is unauthorized for user [ml_admin_plus_data]\""));
|
||||
}
|
||||
|
||||
public void testLookbackWithPipelineBucketAgg() throws Exception {
|
||||
String jobId = "pipeline-bucket-agg-job";
|
||||
String job = "{\"analysis_config\" :{\"bucket_span\":\"1h\","
|
||||
@ -665,10 +729,14 @@ public class DatafeedJobsRestIT extends ESRestTestCase {
|
||||
assertThat(jobStatsResponseAsString, containsString("\"missing_field_count\":0"));
|
||||
}
|
||||
}
|
||||
|
||||
private void startDatafeedAndWaitUntilStopped(String datafeedId) throws Exception {
|
||||
startDatafeedAndWaitUntilStopped(datafeedId, BASIC_AUTH_VALUE_SUPER_USER);
|
||||
}
|
||||
|
||||
private void startDatafeedAndWaitUntilStopped(String datafeedId, String authHeader) throws Exception {
|
||||
Response startDatafeedRequest = client().performRequest("post",
|
||||
MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start?start=2016-06-01T00:00:00Z&end=2016-06-02T00:00:00Z");
|
||||
MachineLearning.BASE_PATH + "datafeeds/" + datafeedId + "/_start?start=2016-06-01T00:00:00Z&end=2016-06-02T00:00:00Z",
|
||||
new BasicHeader("Authorization", authHeader));
|
||||
assertThat(startDatafeedRequest.getStatusLine().getStatusCode(), equalTo(200));
|
||||
assertThat(responseEntityToString(startDatafeedRequest), equalTo("{\"started\":true}"));
|
||||
assertBusy(() -> {
|
||||
@ -763,9 +831,9 @@ public class DatafeedJobsRestIT extends ESRestTestCase {
|
||||
}
|
||||
|
||||
DatafeedBuilder setChunkingTimespan(String timespan) {
|
||||
chunkingTimespan = timespan;
|
||||
return this;
|
||||
}
|
||||
chunkingTimespan = timespan;
|
||||
return this;
|
||||
}
|
||||
|
||||
Response build() throws IOException {
|
||||
String datafeedConfig = "{"
|
||||
|
@ -278,7 +278,8 @@ public class JobProviderIT extends XPackSingleNodeTestCase {
|
||||
for (SpecialEvent event : events) {
|
||||
IndexRequest indexRequest = new IndexRequest(MlMetaIndex.INDEX_NAME, MlMetaIndex.TYPE, event.documentId());
|
||||
try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
|
||||
indexRequest.source(event.toXContent(builder, ToXContent.EMPTY_PARAMS));
|
||||
ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(MlMetaIndex.INCLUDE_TYPE_KEY, "true"));
|
||||
indexRequest.source(event.toXContent(builder, params));
|
||||
bulkRequest.add(indexRequest);
|
||||
}
|
||||
}
|
||||
|
@ -10,11 +10,15 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.ml.action.OpenJobAction.JobTask;
|
||||
import org.elasticsearch.xpack.ml.calendars.SpecialEvent;
|
||||
import org.elasticsearch.xpack.ml.calendars.SpecialEventTests;
|
||||
import org.elasticsearch.xpack.ml.job.config.AnalysisConfig;
|
||||
import org.elasticsearch.xpack.ml.job.config.DataDescription;
|
||||
import org.elasticsearch.xpack.ml.job.config.DetectionRule;
|
||||
import org.elasticsearch.xpack.ml.job.config.Detector;
|
||||
import org.elasticsearch.xpack.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.ml.job.config.JobUpdate;
|
||||
import org.elasticsearch.xpack.ml.job.config.RuleCondition;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.StateStreamer;
|
||||
import org.elasticsearch.xpack.ml.job.process.DataCountsReporter;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutoDetectResultProcessor;
|
||||
@ -23,13 +27,17 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.params.FlushJobParams;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.params.TimeRange;
|
||||
import org.junit.Before;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.InOrder;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.time.Duration;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
@ -48,6 +56,7 @@ import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.never;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyNoMoreInteractions;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class AutodetectCommunicatorTests extends ESTestCase {
|
||||
@ -65,10 +74,52 @@ public class AutodetectCommunicatorTests extends ESTestCase {
|
||||
try (AutodetectCommunicator communicator = createAutodetectCommunicator(process, mock(AutoDetectResultProcessor.class))) {
|
||||
communicator.writeToJob(new ByteArrayInputStream(new byte[0]),
|
||||
randomFrom(XContentType.values()), params, (dataCounts, e) -> {});
|
||||
Mockito.verify(process).writeResetBucketsControlMessage(params);
|
||||
verify(process).writeResetBucketsControlMessage(params);
|
||||
}
|
||||
}
|
||||
|
||||
public void testWriteUpdateProcessMessage() throws IOException {
|
||||
AutodetectProcess process = mockAutodetectProcessWithOutputStream();
|
||||
when(process.isReady()).thenReturn(true);
|
||||
AutodetectCommunicator communicator = createAutodetectCommunicator(process, mock(AutoDetectResultProcessor.class));
|
||||
|
||||
List<RuleCondition> conditions = Collections.singletonList(
|
||||
RuleCondition.createCategorical("foo", "bar"));
|
||||
|
||||
List<JobUpdate.DetectorUpdate> detectorUpdates = Collections.singletonList(
|
||||
new JobUpdate.DetectorUpdate(0, "updated description",
|
||||
Collections.singletonList(new DetectionRule.Builder(conditions).build())));
|
||||
|
||||
UpdateParams updateParams = new UpdateParams(null, detectorUpdates, true);
|
||||
List<SpecialEvent> events = Collections.singletonList(SpecialEventTests.createSpecialEvent());
|
||||
|
||||
communicator.writeUpdateProcessMessage(updateParams, events, ((aVoid, e) -> {}));
|
||||
|
||||
// There are 2 detectors both will be updated with the rule for the special event.
|
||||
// The first has an additional update rule
|
||||
ArgumentCaptor<List> captor = ArgumentCaptor.forClass(List.class);
|
||||
InOrder inOrder = Mockito.inOrder(process);
|
||||
inOrder.verify(process).writeUpdateDetectorRulesMessage(eq(0), captor.capture());
|
||||
assertEquals(2, captor.getValue().size());
|
||||
inOrder.verify(process).writeUpdateDetectorRulesMessage(eq(1), captor.capture());
|
||||
assertEquals(1, captor.getValue().size());
|
||||
verify(process).isProcessAlive();
|
||||
verifyNoMoreInteractions(process);
|
||||
|
||||
|
||||
// This time there is a single detector update and no special events
|
||||
detectorUpdates = Collections.singletonList(
|
||||
new JobUpdate.DetectorUpdate(1, "updated description",
|
||||
Collections.singletonList(new DetectionRule.Builder(conditions).build())));
|
||||
updateParams = new UpdateParams(null, detectorUpdates, true);
|
||||
communicator.writeUpdateProcessMessage(updateParams, Collections.emptyList(), ((aVoid, e) -> {}));
|
||||
|
||||
inOrder = Mockito.inOrder(process);
|
||||
inOrder.verify(process).writeUpdateDetectorRulesMessage(eq(1), captor.capture());
|
||||
assertEquals(1, captor.getValue().size());
|
||||
verify(process, times(2)).isProcessAlive();
|
||||
}
|
||||
|
||||
public void testFlushJob() throws IOException {
|
||||
AutodetectProcess process = mockAutodetectProcessWithOutputStream();
|
||||
when(process.isProcessAlive()).thenReturn(true);
|
||||
@ -175,9 +226,10 @@ public class AutodetectCommunicatorTests extends ESTestCase {
|
||||
DataDescription.Builder dd = new DataDescription.Builder();
|
||||
dd.setTimeField("time_field");
|
||||
|
||||
Detector.Builder detector = new Detector.Builder("metric", "value");
|
||||
detector.setByFieldName("host-metric");
|
||||
AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(detector.build()));
|
||||
Detector.Builder metric = new Detector.Builder("metric", "value");
|
||||
metric.setByFieldName("host-metric");
|
||||
Detector.Builder count = new Detector.Builder("count", null);
|
||||
AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Arrays.asList(metric.build(), count.build()));
|
||||
|
||||
builder.setDataDescription(dd);
|
||||
builder.setAnalysisConfig(ac);
|
||||
|
@ -6,6 +6,7 @@
|
||||
package org.elasticsearch.xpack.monitoring;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -48,6 +49,7 @@ public class MonitoringServiceTests extends ESTestCase {
|
||||
final Monitoring monitoring = new Monitoring(Settings.EMPTY, licenseState);
|
||||
clusterSettings = new ClusterSettings(Settings.EMPTY, new HashSet<>(monitoring.getSettings()));
|
||||
when(clusterService.getClusterSettings()).thenReturn(clusterSettings);
|
||||
when(clusterService.state()).thenReturn(mock(ClusterState.class));
|
||||
}
|
||||
|
||||
@After
|
||||
@ -59,7 +61,7 @@ public class MonitoringServiceTests extends ESTestCase {
|
||||
}
|
||||
|
||||
public void testIsMonitoringActive() throws Exception {
|
||||
monitoringService = new MonitoringService(Settings.EMPTY, clusterSettings, threadPool, emptySet(), new CountingExporter());
|
||||
monitoringService = new MonitoringService(Settings.EMPTY, clusterService, threadPool, emptySet(), new CountingExporter());
|
||||
|
||||
monitoringService.start();
|
||||
assertBusy(() -> assertTrue(monitoringService.isStarted()));
|
||||
@ -82,7 +84,7 @@ public class MonitoringServiceTests extends ESTestCase {
|
||||
Settings settings = Settings.builder().put(MonitoringService.INTERVAL.getKey(), TimeValue.MINUS_ONE).build();
|
||||
|
||||
CountingExporter exporter = new CountingExporter();
|
||||
monitoringService = new MonitoringService(settings, clusterSettings, threadPool, emptySet(), exporter);
|
||||
monitoringService = new MonitoringService(settings, clusterService, threadPool, emptySet(), exporter);
|
||||
|
||||
monitoringService.start();
|
||||
assertBusy(() -> assertTrue(monitoringService.isStarted()));
|
||||
@ -105,7 +107,7 @@ public class MonitoringServiceTests extends ESTestCase {
|
||||
final BlockingExporter exporter = new BlockingExporter(latch);
|
||||
|
||||
Settings settings = Settings.builder().put(MonitoringService.INTERVAL.getKey(), MonitoringService.MIN_INTERVAL).build();
|
||||
monitoringService = new MonitoringService(settings, clusterSettings, threadPool, emptySet(), exporter);
|
||||
monitoringService = new MonitoringService(settings, clusterService, threadPool, emptySet(), exporter);
|
||||
|
||||
monitoringService.start();
|
||||
assertBusy(() -> assertTrue(monitoringService.isStarted()));
|
||||
|
@ -59,7 +59,7 @@ public abstract class BaseCollectorTestCase extends ESTestCase {
|
||||
|
||||
protected void whenLocalNodeElectedMaster(final boolean electedMaster) {
|
||||
when(clusterService.state()).thenReturn(clusterState);
|
||||
when(clusterState.nodes()).thenReturn(nodes);
|
||||
when(clusterState.getNodes()).thenReturn(nodes);
|
||||
when(nodes.isLocalNodeElectedMaster()).thenReturn(electedMaster);
|
||||
}
|
||||
|
||||
|
@ -66,24 +66,17 @@ public class ClusterStatsCollectorTests extends BaseCollectorTestCase {
|
||||
}
|
||||
|
||||
public void testShouldCollectReturnsFalseIfNotMaster() {
|
||||
// this controls the blockage
|
||||
whenLocalNodeElectedMaster(false);
|
||||
|
||||
final ClusterStatsCollector collector =
|
||||
new ClusterStatsCollector(Settings.EMPTY, clusterService, licenseState, client, licenseService);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(false));
|
||||
verify(nodes).isLocalNodeElectedMaster();
|
||||
assertThat(collector.shouldCollect(false), is(false));
|
||||
}
|
||||
|
||||
public void testShouldCollectReturnsTrue() {
|
||||
whenLocalNodeElectedMaster(true);
|
||||
|
||||
final ClusterStatsCollector collector =
|
||||
new ClusterStatsCollector(Settings.EMPTY, clusterService, licenseState, client, licenseService);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(true));
|
||||
verify(nodes).isLocalNodeElectedMaster();
|
||||
assertThat(collector.shouldCollect(true), is(true));
|
||||
}
|
||||
|
||||
public void testDoAPMIndicesExistReturnsBasedOnIndices() {
|
||||
@ -219,7 +212,7 @@ public class ClusterStatsCollectorTests extends BaseCollectorTestCase {
|
||||
|
||||
final long interval = randomNonNegativeLong();
|
||||
|
||||
final Collection<MonitoringDoc> results = collector.doCollect(node, interval);
|
||||
final Collection<MonitoringDoc> results = collector.doCollect(node, interval, clusterState);
|
||||
assertEquals(1, results.size());
|
||||
|
||||
final MonitoringDoc monitoringDoc = results.iterator().next();
|
||||
@ -254,7 +247,8 @@ public class ClusterStatsCollectorTests extends BaseCollectorTestCase {
|
||||
assertThat(document.getClusterState().stateUUID(), equalTo(clusterState.stateUUID()));
|
||||
|
||||
verify(clusterService, times(1)).getClusterName();
|
||||
verify(clusterService, times(2)).state();
|
||||
verify(clusterState, times(1)).metaData();
|
||||
verify(metaData, times(1)).clusterUUID();
|
||||
verify(licenseService, times(1)).getLicense();
|
||||
verify(clusterAdminClient).prepareClusterStats();
|
||||
verify(client).execute(same(XPackUsageAction.INSTANCE), any(XPackUsageRequest.class));
|
||||
|
@ -44,6 +44,7 @@ import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.spy;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
@ -52,35 +53,30 @@ public class IndexRecoveryCollectorTests extends BaseCollectorTestCase {
|
||||
public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() {
|
||||
// this controls the blockage
|
||||
when(licenseState.isMonitoringAllowed()).thenReturn(false);
|
||||
whenLocalNodeElectedMaster(randomBoolean());
|
||||
final boolean isElectedMaster = randomBoolean();
|
||||
whenLocalNodeElectedMaster(isElectedMaster);
|
||||
|
||||
final IndexRecoveryCollector collector = new IndexRecoveryCollector(Settings.EMPTY, clusterService, licenseState, client);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(false));
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
assertThat(collector.shouldCollect(isElectedMaster), is(false));
|
||||
if (isElectedMaster) {
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
}
|
||||
}
|
||||
|
||||
public void testShouldCollectReturnsFalseIfNotMaster() {
|
||||
when(licenseState.isMonitoringAllowed()).thenReturn(true);
|
||||
// this controls the blockage
|
||||
whenLocalNodeElectedMaster(false);
|
||||
|
||||
final IndexRecoveryCollector collector = new IndexRecoveryCollector(Settings.EMPTY, clusterService, licenseState, client);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(false));
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
verify(nodes).isLocalNodeElectedMaster();
|
||||
assertThat(collector.shouldCollect(false), is(false));
|
||||
}
|
||||
|
||||
public void testShouldCollectReturnsTrue() {
|
||||
when(licenseState.isMonitoringAllowed()).thenReturn(true);
|
||||
whenLocalNodeElectedMaster(true);
|
||||
|
||||
final IndexRecoveryCollector collector = new IndexRecoveryCollector(Settings.EMPTY, clusterService, licenseState, client);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(true));
|
||||
assertThat(collector.shouldCollect(true), is(true));
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
verify(nodes).isLocalNodeElectedMaster();
|
||||
}
|
||||
|
||||
public void testDoCollect() throws Exception {
|
||||
@ -157,8 +153,12 @@ public class IndexRecoveryCollectorTests extends BaseCollectorTestCase {
|
||||
|
||||
final long interval = randomNonNegativeLong();
|
||||
|
||||
final Collection<MonitoringDoc> results = collector.doCollect(node, interval);
|
||||
final Collection<MonitoringDoc> results = collector.doCollect(node, interval, clusterState);
|
||||
verify(indicesAdminClient).prepareRecoveries();
|
||||
if (recoveryStates.isEmpty() == false) {
|
||||
verify(clusterState).metaData();
|
||||
verify(metaData).clusterUUID();
|
||||
}
|
||||
|
||||
if (nbRecoveries == 0) {
|
||||
assertEquals(0, results.size());
|
||||
|
@ -36,6 +36,7 @@ import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.doReturn;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.spy;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
@ -44,35 +45,30 @@ public class IndexStatsCollectorTests extends BaseCollectorTestCase {
|
||||
public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() {
|
||||
// this controls the blockage
|
||||
when(licenseState.isMonitoringAllowed()).thenReturn(false);
|
||||
whenLocalNodeElectedMaster(randomBoolean());
|
||||
final boolean isElectedMaster = randomBoolean();
|
||||
whenLocalNodeElectedMaster(isElectedMaster);
|
||||
|
||||
final IndexStatsCollector collector = new IndexStatsCollector(Settings.EMPTY, clusterService, licenseState, client);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(false));
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
assertThat(collector.shouldCollect(isElectedMaster), is(false));
|
||||
if (isElectedMaster) {
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
}
|
||||
}
|
||||
|
||||
public void testShouldCollectReturnsFalseIfNotMaster() {
|
||||
when(licenseState.isMonitoringAllowed()).thenReturn(true);
|
||||
// this controls the blockage
|
||||
whenLocalNodeElectedMaster(false);
|
||||
|
||||
final IndexStatsCollector collector = new IndexStatsCollector(Settings.EMPTY, clusterService, licenseState, client);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(false));
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
verify(nodes).isLocalNodeElectedMaster();
|
||||
assertThat(collector.shouldCollect(false), is(false));
|
||||
}
|
||||
|
||||
public void testShouldCollectReturnsTrue() {
|
||||
when(licenseState.isMonitoringAllowed()).thenReturn(true);
|
||||
whenLocalNodeElectedMaster(true);
|
||||
|
||||
final IndexStatsCollector collector = new IndexStatsCollector(Settings.EMPTY, clusterService, licenseState, client);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(true));
|
||||
assertThat(collector.shouldCollect(true), is(true));
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
verify(nodes).isLocalNodeElectedMaster();
|
||||
}
|
||||
|
||||
public void testDoCollect() throws Exception {
|
||||
@ -133,8 +129,11 @@ public class IndexStatsCollectorTests extends BaseCollectorTestCase {
|
||||
|
||||
final long interval = randomNonNegativeLong();
|
||||
|
||||
final Collection<MonitoringDoc> results = collector.doCollect(node, interval);
|
||||
final Collection<MonitoringDoc> results = collector.doCollect(node, interval, clusterState);
|
||||
verify(indicesAdminClient).prepareStats();
|
||||
verify(clusterState, times(1 + indices)).metaData();
|
||||
verify(clusterState, times(indices)).routingTable();
|
||||
verify(metaData).clusterUUID();
|
||||
|
||||
assertEquals(1 + indices, results.size());
|
||||
|
||||
|
@ -43,6 +43,8 @@ public class JobStatsCollectorTests extends BaseCollectorTestCase {
|
||||
public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() {
|
||||
final Settings settings = randomFrom(mlEnabledSettings(), mlDisabledSettings());
|
||||
final boolean mlAllowed = randomBoolean();
|
||||
final boolean isElectedMaster = randomBoolean();
|
||||
whenLocalNodeElectedMaster(isElectedMaster);
|
||||
|
||||
// this controls the blockage
|
||||
when(licenseState.isMonitoringAllowed()).thenReturn(false);
|
||||
@ -50,9 +52,10 @@ public class JobStatsCollectorTests extends BaseCollectorTestCase {
|
||||
|
||||
final JobStatsCollector collector = new JobStatsCollector(settings, clusterService, licenseState, client);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(false));
|
||||
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
assertThat(collector.shouldCollect(isElectedMaster), is(false));
|
||||
if (isElectedMaster) {
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
}
|
||||
}
|
||||
|
||||
public void testShouldCollectReturnsFalseIfNotMaster() {
|
||||
@ -62,13 +65,11 @@ public class JobStatsCollectorTests extends BaseCollectorTestCase {
|
||||
when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean());
|
||||
when(licenseState.isMachineLearningAllowed()).thenReturn(randomBoolean());
|
||||
// this controls the blockage
|
||||
whenLocalNodeElectedMaster(false);
|
||||
final boolean isElectedMaster = false;
|
||||
|
||||
final JobStatsCollector collector = new JobStatsCollector(settings, clusterService, licenseState, client);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(false));
|
||||
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
assertThat(collector.shouldCollect(isElectedMaster), is(false));
|
||||
}
|
||||
|
||||
public void testShouldCollectReturnsFalseIfMLIsDisabled() {
|
||||
@ -77,13 +78,17 @@ public class JobStatsCollectorTests extends BaseCollectorTestCase {
|
||||
|
||||
when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean());
|
||||
when(licenseState.isMachineLearningAllowed()).thenReturn(randomBoolean());
|
||||
whenLocalNodeElectedMaster(randomBoolean());
|
||||
|
||||
final boolean isElectedMaster = randomBoolean();
|
||||
whenLocalNodeElectedMaster(isElectedMaster);
|
||||
|
||||
final JobStatsCollector collector = new JobStatsCollector(settings, clusterService, licenseState, client);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(false));
|
||||
assertThat(collector.shouldCollect(isElectedMaster), is(false));
|
||||
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
if (isElectedMaster) {
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
}
|
||||
}
|
||||
|
||||
public void testShouldCollectReturnsFalseIfMLIsNotAllowed() {
|
||||
@ -92,13 +97,16 @@ public class JobStatsCollectorTests extends BaseCollectorTestCase {
|
||||
when(licenseState.isMonitoringAllowed()).thenReturn(randomBoolean());
|
||||
// this is controls the blockage
|
||||
when(licenseState.isMachineLearningAllowed()).thenReturn(false);
|
||||
whenLocalNodeElectedMaster(randomBoolean());
|
||||
final boolean isElectedMaster = randomBoolean();
|
||||
whenLocalNodeElectedMaster(isElectedMaster);
|
||||
|
||||
final JobStatsCollector collector = new JobStatsCollector(settings, clusterService, licenseState, client);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(false));
|
||||
assertThat(collector.shouldCollect(isElectedMaster), is(false));
|
||||
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
if (isElectedMaster) {
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
}
|
||||
}
|
||||
|
||||
public void testShouldCollectReturnsTrue() {
|
||||
@ -106,18 +114,19 @@ public class JobStatsCollectorTests extends BaseCollectorTestCase {
|
||||
|
||||
when(licenseState.isMonitoringAllowed()).thenReturn(true);
|
||||
when(licenseState.isMachineLearningAllowed()).thenReturn(true);
|
||||
whenLocalNodeElectedMaster(true);
|
||||
final boolean isElectedMaster = true;
|
||||
|
||||
final JobStatsCollector collector = new JobStatsCollector(settings, clusterService, licenseState, client);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(true));
|
||||
assertThat(collector.shouldCollect(isElectedMaster), is(true));
|
||||
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
}
|
||||
|
||||
public void testDoCollect() throws Exception {
|
||||
final MetaData metaData = mock(MetaData.class);
|
||||
final String clusterUuid = randomAlphaOfLength(5);
|
||||
whenClusterStateWithUUID(clusterUuid);
|
||||
|
||||
final MonitoringDoc.Node node = randomMonitoringNode(random());
|
||||
final MachineLearningClient client = mock(MachineLearningClient.class);
|
||||
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
|
||||
@ -125,10 +134,6 @@ public class JobStatsCollectorTests extends BaseCollectorTestCase {
|
||||
final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(1, 120));
|
||||
withCollectionTimeout(JobStatsCollector.JOB_STATS_TIMEOUT, timeout);
|
||||
|
||||
when(clusterService.state()).thenReturn(clusterState);
|
||||
when(clusterState.metaData()).thenReturn(metaData);
|
||||
when(metaData.clusterUUID()).thenReturn(clusterUuid);
|
||||
|
||||
final JobStatsCollector collector = new JobStatsCollector(Settings.EMPTY, clusterService, licenseState, client, threadContext);
|
||||
assertEquals(timeout, collector.getCollectionTimeout());
|
||||
|
||||
@ -143,7 +148,9 @@ public class JobStatsCollectorTests extends BaseCollectorTestCase {
|
||||
|
||||
final long interval = randomNonNegativeLong();
|
||||
|
||||
final List<MonitoringDoc> monitoringDocs = collector.doCollect(node, interval);
|
||||
final List<MonitoringDoc> monitoringDocs = collector.doCollect(node, interval, clusterState);
|
||||
verify(clusterState).metaData();
|
||||
verify(metaData).clusterUUID();
|
||||
|
||||
assertThat(monitoringDocs, hasSize(jobStats.size()));
|
||||
|
||||
|
@ -40,21 +40,24 @@ public class NodeStatsCollectorTests extends BaseCollectorTestCase {
|
||||
public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() {
|
||||
// this controls the blockage
|
||||
when(licenseState.isMonitoringAllowed()).thenReturn(false);
|
||||
whenLocalNodeElectedMaster(randomBoolean());
|
||||
final boolean isElectedMaster = randomBoolean();
|
||||
whenLocalNodeElectedMaster(isElectedMaster);
|
||||
|
||||
final NodeStatsCollector collector = new NodeStatsCollector(Settings.EMPTY, clusterService, licenseState, client);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(false));
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
assertThat(collector.shouldCollect(isElectedMaster), is(false));
|
||||
if (isElectedMaster) {
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
}
|
||||
}
|
||||
|
||||
public void testShouldCollectReturnsTrue() {
|
||||
when(licenseState.isMonitoringAllowed()).thenReturn(true);
|
||||
whenLocalNodeElectedMaster(true);
|
||||
final boolean isElectedMaster = true;
|
||||
|
||||
final NodeStatsCollector collector = new NodeStatsCollector(Settings.EMPTY, clusterService, licenseState, client);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(true));
|
||||
assertThat(collector.shouldCollect(isElectedMaster), is(true));
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
}
|
||||
|
||||
@ -77,7 +80,7 @@ public class NodeStatsCollectorTests extends BaseCollectorTestCase {
|
||||
assertEquals(timeout, collector.getCollectionTimeout());
|
||||
|
||||
final FailedNodeException e = expectThrows(FailedNodeException.class, () ->
|
||||
collector.doCollect(randomMonitoringNode(random()), randomNonNegativeLong()));
|
||||
collector.doCollect(randomMonitoringNode(random()), randomNonNegativeLong(), clusterState));
|
||||
assertEquals(exception, e);
|
||||
}
|
||||
|
||||
@ -112,7 +115,10 @@ public class NodeStatsCollectorTests extends BaseCollectorTestCase {
|
||||
|
||||
final long interval = randomNonNegativeLong();
|
||||
|
||||
final Collection<MonitoringDoc> results = collector.doCollect(node, interval);
|
||||
final Collection<MonitoringDoc> results = collector.doCollect(node, interval, clusterState);
|
||||
verify(clusterState).metaData();
|
||||
verify(metaData).clusterUUID();
|
||||
|
||||
assertEquals(1, results.size());
|
||||
|
||||
final MonitoringDoc monitoringDoc = results.iterator().next();
|
||||
|
@ -45,12 +45,15 @@ public class ShardsCollectorTests extends BaseCollectorTestCase {
|
||||
public void testShouldCollectReturnsFalseIfMonitoringNotAllowed() {
|
||||
// this controls the blockage
|
||||
when(licenseState.isMonitoringAllowed()).thenReturn(false);
|
||||
whenLocalNodeElectedMaster(randomBoolean());
|
||||
final boolean isElectedMaster = randomBoolean();
|
||||
whenLocalNodeElectedMaster(isElectedMaster);
|
||||
|
||||
final ShardsCollector collector = new ShardsCollector(Settings.EMPTY, clusterService, licenseState);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(false));
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
assertThat(collector.shouldCollect(isElectedMaster), is(false));
|
||||
if (isElectedMaster) {
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
}
|
||||
}
|
||||
|
||||
public void testShouldCollectReturnsFalseIfNotMaster() {
|
||||
@ -60,9 +63,7 @@ public class ShardsCollectorTests extends BaseCollectorTestCase {
|
||||
|
||||
final ShardsCollector collector = new ShardsCollector(Settings.EMPTY, clusterService, licenseState);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(false));
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
verify(nodes).isLocalNodeElectedMaster();
|
||||
assertThat(collector.shouldCollect(false), is(false));
|
||||
}
|
||||
|
||||
public void testShouldCollectReturnsTrue() {
|
||||
@ -71,20 +72,16 @@ public class ShardsCollectorTests extends BaseCollectorTestCase {
|
||||
|
||||
final ShardsCollector collector = new ShardsCollector(Settings.EMPTY, clusterService, licenseState);
|
||||
|
||||
assertThat(collector.shouldCollect(), is(true));
|
||||
assertThat(collector.shouldCollect(true), is(true));
|
||||
verify(licenseState).isMonitoringAllowed();
|
||||
verify(nodes).isLocalNodeElectedMaster();
|
||||
}
|
||||
|
||||
public void testDoCollectWhenNoClusterState() throws Exception {
|
||||
when(clusterService.state()).thenReturn(null);
|
||||
|
||||
final ShardsCollector collector = new ShardsCollector(Settings.EMPTY, clusterService, licenseState);
|
||||
|
||||
final Collection<MonitoringDoc> results = collector.doCollect(randomMonitoringNode(random()), randomNonNegativeLong());
|
||||
final Collection<MonitoringDoc> results = collector.doCollect(randomMonitoringNode(random()), randomNonNegativeLong(), null);
|
||||
assertThat(results, notNullValue());
|
||||
assertThat(results.size(), equalTo(0));
|
||||
verify(clusterService).state();
|
||||
}
|
||||
|
||||
public void testDoCollect() throws Exception {
|
||||
@ -114,7 +111,10 @@ public class ShardsCollectorTests extends BaseCollectorTestCase {
|
||||
|
||||
final long interval = randomNonNegativeLong();
|
||||
|
||||
final Collection<MonitoringDoc> results = collector.doCollect(node, interval);
|
||||
final Collection<MonitoringDoc> results = collector.doCollect(node, interval, clusterState);
|
||||
verify(clusterState).metaData();
|
||||
verify(metaData).clusterUUID();
|
||||
|
||||
assertThat(results, notNullValue());
|
||||
assertThat(results.size(), equalTo((indices != NONE) ? routingTable.allShards().size() : 0));
|
||||
|
||||
|
@ -1,147 +0,0 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.watcher.input.http;
|
||||
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.transport.Netty4Plugin;
|
||||
import org.elasticsearch.xpack.watcher.client.WatcherClient;
|
||||
import org.elasticsearch.xpack.watcher.common.http.HttpRequestTemplate;
|
||||
import org.elasticsearch.xpack.watcher.common.text.TextTemplate;
|
||||
import org.elasticsearch.xpack.watcher.condition.CompareCondition;
|
||||
import org.elasticsearch.xpack.watcher.history.HistoryStore;
|
||||
import org.elasticsearch.xpack.watcher.support.xcontent.XContentSource;
|
||||
import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase;
|
||||
import org.elasticsearch.xpack.watcher.transport.actions.put.PutWatchResponse;
|
||||
import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.elasticsearch.xpack.watcher.actions.ActionBuilders.loggingAction;
|
||||
import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder;
|
||||
import static org.elasticsearch.xpack.watcher.input.InputBuilders.httpInput;
|
||||
import static org.elasticsearch.xpack.watcher.test.WatcherTestUtils.xContentSource;
|
||||
import static org.elasticsearch.xpack.watcher.trigger.TriggerBuilders.schedule;
|
||||
import static org.elasticsearch.xpack.watcher.trigger.schedule.Schedules.interval;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
@TestLogging("org.elasticsearch.xpack.watcher:DEBUG,org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE")
|
||||
public class HttpInputIntegrationTests extends AbstractWatcherIntegrationTestCase {
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), true)
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
ArrayList<Class<? extends Plugin>> plugins = new ArrayList<>(super.nodePlugins());
|
||||
plugins.add(Netty4Plugin.class); // for http
|
||||
return plugins;
|
||||
}
|
||||
|
||||
public void testHttpInput() throws Exception {
|
||||
createIndex("index");
|
||||
client().prepareIndex("index", "type", "id").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get();
|
||||
|
||||
InetSocketAddress address = internalCluster().httpAddresses()[0];
|
||||
watcherClient().preparePutWatch("_name")
|
||||
.setSource(watchBuilder()
|
||||
.trigger(schedule(interval("5s")))
|
||||
.input(httpInput(HttpRequestTemplate.builder(address.getHostString(), address.getPort())
|
||||
.path("/index/_search")
|
||||
.body(jsonBuilder().startObject().field("size", 1).endObject().string())
|
||||
.putHeader("Content-Type", new TextTemplate("application/json"))))
|
||||
.condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.EQ, 1L))
|
||||
.addAction("_id", loggingAction("anything")))
|
||||
.get();
|
||||
|
||||
timeWarp().trigger("_name");
|
||||
refresh();
|
||||
assertWatchWithMinimumPerformedActionsCount("_name", 1, false);
|
||||
}
|
||||
|
||||
public void testHttpInputClusterStats() throws Exception {
|
||||
InetSocketAddress address = internalCluster().httpAddresses()[0];
|
||||
PutWatchResponse putWatchResponse = watcherClient().preparePutWatch("_name")
|
||||
.setSource(watchBuilder()
|
||||
.trigger(schedule(interval("1s")))
|
||||
.input(httpInput(HttpRequestTemplate.builder(address.getHostString(), address.getPort()).path("/_cluster/stats")))
|
||||
.condition(new CompareCondition("ctx.payload.nodes.count.total", CompareCondition.Op.GTE, 1L))
|
||||
.addAction("_id", loggingAction("anything")))
|
||||
.get();
|
||||
|
||||
assertTrue(putWatchResponse.isCreated());
|
||||
timeWarp().trigger("_name");
|
||||
refresh();
|
||||
assertWatchWithMinimumPerformedActionsCount("_name", 1, false);
|
||||
}
|
||||
|
||||
public void testInputFiltering() throws Exception {
|
||||
WatcherClient watcherClient = watcherClient();
|
||||
createIndex("idx");
|
||||
// Have a sample document in the index, the watch is going to evaluate
|
||||
client().prepareIndex("idx", "type").setSource("field", "value").get();
|
||||
refresh();
|
||||
|
||||
InetSocketAddress address = internalCluster().httpAddresses()[0];
|
||||
XContentBuilder body = jsonBuilder().prettyPrint().startObject()
|
||||
.field("query").value(termQuery("field", "value"))
|
||||
.endObject();
|
||||
HttpRequestTemplate.Builder requestBuilder = HttpRequestTemplate.builder(address.getHostString(), address.getPort())
|
||||
.path(new TextTemplate("/idx/_search"))
|
||||
.body(body.string());
|
||||
|
||||
watcherClient.preparePutWatch("_name1")
|
||||
.setSource(watchBuilder()
|
||||
.trigger(schedule(interval(10, IntervalSchedule.Interval.Unit.SECONDS)))
|
||||
.input(httpInput(requestBuilder).extractKeys("hits.total"))
|
||||
.condition(new CompareCondition("ctx.payload.hits.total", CompareCondition.Op.EQ, 1L)))
|
||||
.get();
|
||||
|
||||
// in this watcher the condition will fail, because max_score isn't extracted, only total:
|
||||
watcherClient.preparePutWatch("_name2")
|
||||
.setSource(watchBuilder()
|
||||
.trigger(schedule(interval(10, IntervalSchedule.Interval.Unit.SECONDS)))
|
||||
.input(httpInput(requestBuilder).extractKeys("hits.total"))
|
||||
.condition(new CompareCondition("ctx.payload.hits.max_score", CompareCondition.Op.GTE, 0L)))
|
||||
.get();
|
||||
|
||||
timeWarp().trigger("_name1");
|
||||
timeWarp().trigger("_name2");
|
||||
refresh();
|
||||
|
||||
assertWatchWithMinimumPerformedActionsCount("_name1", 1, false);
|
||||
assertWatchWithNoActionNeeded("_name2", 1);
|
||||
|
||||
// Check that the input result payload has been filtered
|
||||
refresh();
|
||||
SearchResponse searchResponse = client().prepareSearch(HistoryStore.INDEX_PREFIX_WITH_TEMPLATE + "*")
|
||||
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
|
||||
.setQuery(matchQuery("watch_id", "_name1"))
|
||||
.setSize(1)
|
||||
.get();
|
||||
assertHitCount(searchResponse, 1);
|
||||
XContentSource source = xContentSource(searchResponse.getHits().getAt(0).getSourceRef());
|
||||
assertThat(source.getValue("result.input.payload.hits.total"), equalTo((Object) 1));
|
||||
}
|
||||
}
|
@ -220,12 +220,9 @@ public class HttpInputTests extends ESTestCase {
|
||||
.endObject();
|
||||
XContentParser parser = createParser(builder);
|
||||
parser.nextToken();
|
||||
try {
|
||||
httpParser.parseInput("_id", parser);
|
||||
fail("Expected IllegalArgumentException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), is("unsupported http method [_METHOD]"));
|
||||
}
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> httpParser.parseInput("_id", parser));
|
||||
assertThat(e.getMessage(), is("unsupported http method [_METHOD]"));
|
||||
}
|
||||
|
||||
public void testThatHeadersAreIncludedInPayload() throws Exception {
|
||||
|
@ -0,0 +1,17 @@
|
||||
{
|
||||
"xpack.ml.delete_calendar": {
|
||||
"methods": [ "DELETE" ],
|
||||
"url": {
|
||||
"path": "/_xpack/ml/calendars/{calendar_id}",
|
||||
"paths": [ "/_xpack/ml/calendars/{calendar_id}" ],
|
||||
"parts": {
|
||||
"calendar_id": {
|
||||
"type" : "string",
|
||||
"required" : true,
|
||||
"description" : "The ID of the calendar to delete"
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": null
|
||||
}
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
{
|
||||
"xpack.ml.get_calendars": {
|
||||
"methods": [ "GET" ],
|
||||
"url": {
|
||||
"path": "/_xpack/ml/calendars/{calendar_id}",
|
||||
"paths": [
|
||||
"/_xpack/ml/calendars",
|
||||
"/_xpack/ml/calendars/{calendar_id}"
|
||||
],
|
||||
"parts": {
|
||||
"calendar_id": {
|
||||
"type": "string",
|
||||
"description": "The ID of the calendar to fetch"
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"from": {
|
||||
"type": "int",
|
||||
"description": "skips a number of calendars"
|
||||
},
|
||||
"size": {
|
||||
"type": "int",
|
||||
"description": "specifies a max number of calendars to get"
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": null
|
||||
}
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
{
|
||||
"xpack.ml.put_calendar": {
|
||||
"methods": [ "PUT" ],
|
||||
"url": {
|
||||
"path": "/_xpack/ml/calendars/{calendar_id}",
|
||||
"paths": [ "/_xpack/ml/calendars/{calendar_id}" ],
|
||||
"parts": {
|
||||
"calendar_id": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "The ID of the calendar to create"
|
||||
}
|
||||
}
|
||||
},
|
||||
"body": {
|
||||
"description" : "The calendar details",
|
||||
"required" : false
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,108 @@
|
||||
---
|
||||
"Test calendar CRUD":
|
||||
|
||||
- do:
|
||||
xpack.ml.put_calendar:
|
||||
calendar_id: "advent"
|
||||
body: >
|
||||
{
|
||||
"job_ids": ["abc", "xyz"]
|
||||
}
|
||||
- match: { calendar_id: advent }
|
||||
- match: { job_ids.0: abc }
|
||||
- match: { job_ids.1: xyz }
|
||||
|
||||
- do:
|
||||
xpack.ml.get_calendars:
|
||||
calendar_id: "advent"
|
||||
- match: { count: 1 }
|
||||
- match:
|
||||
calendars.0:
|
||||
calendar_id: "advent"
|
||||
job_ids: ["abc", "xyz"]
|
||||
- is_false: type
|
||||
|
||||
- do:
|
||||
xpack.ml.put_calendar:
|
||||
calendar_id: "Dogs of the Year"
|
||||
body: >
|
||||
{
|
||||
"job_ids": ["abc2"]
|
||||
}
|
||||
|
||||
- do:
|
||||
xpack.ml.put_calendar:
|
||||
calendar_id: "Cats of the Year"
|
||||
|
||||
- do:
|
||||
xpack.ml.get_calendars: {}
|
||||
- match: { count: 3 }
|
||||
|
||||
- do:
|
||||
xpack.ml.delete_calendar:
|
||||
calendar_id: "Dogs of the Year"
|
||||
|
||||
- do:
|
||||
xpack.ml.get_calendars: {}
|
||||
- match: { count: 2 }
|
||||
|
||||
- do:
|
||||
catch: missing
|
||||
xpack.ml.get_calendars:
|
||||
calendar_id: "Dogs of the Year"
|
||||
|
||||
---
|
||||
"Test PageParams":
|
||||
- do:
|
||||
xpack.ml.put_calendar:
|
||||
calendar_id: "Calendar1"
|
||||
- do:
|
||||
xpack.ml.put_calendar:
|
||||
calendar_id: "Calendar2"
|
||||
- do:
|
||||
xpack.ml.put_calendar:
|
||||
calendar_id: "Calendar3"
|
||||
|
||||
- do:
|
||||
xpack.ml.get_calendars:
|
||||
from: 2
|
||||
- match: { count: 1 }
|
||||
- match: { calendars.0.calendar_id: Calendar3 }
|
||||
|
||||
- do:
|
||||
xpack.ml.get_calendars:
|
||||
from: 1
|
||||
size: 1
|
||||
- match: { count: 1 }
|
||||
- match: { calendars.0.calendar_id: Calendar2 }
|
||||
|
||||
---
|
||||
"Test PageParams with ID is invalid":
|
||||
- do:
|
||||
catch: bad_request
|
||||
xpack.ml.get_calendars:
|
||||
calendar_id: Tides
|
||||
size: 10
|
||||
|
||||
---
|
||||
"Test cannot overwrite an exisiting calendar":
|
||||
|
||||
- do:
|
||||
xpack.ml.put_calendar:
|
||||
calendar_id: "Mayan"
|
||||
body: >
|
||||
{
|
||||
"job_ids": ["apocalypse"]
|
||||
}
|
||||
|
||||
- do:
|
||||
catch: /version_conflict_engine_exception/
|
||||
xpack.ml.put_calendar:
|
||||
calendar_id: "Mayan"
|
||||
|
||||
---
|
||||
"Test cannot create calendar with name _all":
|
||||
- do:
|
||||
catch: bad_request
|
||||
xpack.ml.put_calendar:
|
||||
calendar_id: "_all"
|
@ -202,3 +202,19 @@ setup:
|
||||
catch: missing
|
||||
xpack.ml.get_filters:
|
||||
filter_id: "filter-foo"
|
||||
|
||||
---
|
||||
"Test get all filter given no filter exists":
|
||||
|
||||
- do:
|
||||
xpack.ml.delete_filter:
|
||||
filter_id: "filter-foo"
|
||||
|
||||
- do:
|
||||
xpack.ml.delete_filter:
|
||||
filter_id: "filter-foo2"
|
||||
|
||||
- do:
|
||||
xpack.ml.get_filters: {}
|
||||
- match: { count: 0 }
|
||||
- match: { filters: [] }
|
||||
|
@ -1,20 +0,0 @@
|
||||
---
|
||||
"Test get all filter given no filter exists":
|
||||
|
||||
- do:
|
||||
xpack.ml.put_filter:
|
||||
filter_id: filter-foo
|
||||
body: >
|
||||
{
|
||||
"filter_id": "filter-foo",
|
||||
"items": ["abc", "xyz"]
|
||||
}
|
||||
|
||||
- do:
|
||||
xpack.ml.delete_filter:
|
||||
filter_id: "filter-foo"
|
||||
|
||||
- do:
|
||||
xpack.ml.get_filters: {}
|
||||
- match: { count: 0 }
|
||||
- match: { filters: [] }
|
@ -0,0 +1,58 @@
|
||||
---
|
||||
setup:
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
|
||||
---
|
||||
"HTTP input supports extracting of keys":
|
||||
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { metadata.cluster_uuid : cluster_uuid }
|
||||
- set: { master_node: master }
|
||||
|
||||
- do:
|
||||
nodes.info: {}
|
||||
- set: { nodes.$master.http.publish_address: http_host }
|
||||
|
||||
- do:
|
||||
xpack.watcher.execute_watch:
|
||||
body: >
|
||||
{
|
||||
"watch" : {
|
||||
"trigger": {
|
||||
"schedule": {
|
||||
"interval": "1s"
|
||||
}
|
||||
},
|
||||
"input" : {
|
||||
"http": {
|
||||
"request": {
|
||||
"url": "http://${http_host}/_cluster/health",
|
||||
"auth" : {
|
||||
"basic" : {
|
||||
"username" : "x_pack_rest_user",
|
||||
"password" : "x-pack-test-password"
|
||||
}
|
||||
}
|
||||
},
|
||||
"extract": [ "timed_out", "cluster_name" ]
|
||||
}
|
||||
},
|
||||
"actions": {
|
||||
"log": {
|
||||
"logging": {
|
||||
"text": "executed at {{ctx.execution_time}}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
- match: { watch_record.result.input.payload.timed_out: false }
|
||||
- match: { watch_record.result.input.payload._status_code: 200 }
|
||||
- is_true: watch_record.result.input.payload._headers
|
||||
- is_true: watch_record.result.input.payload.cluster_name
|
||||
# not part of the extract keys, should not occur
|
||||
- is_false: watch_record.result.input.payload.status
|
@ -17,6 +17,8 @@ integTestRunner {
|
||||
systemProperty 'tests.rest.blacklist', [
|
||||
// Remove tests that are expected to throw an exception, because we cannot then
|
||||
// know whether to expect an authorization exception or a validation exception
|
||||
'ml/calendar_crud/Test cannot create calendar with name _all',
|
||||
'ml/calendar_crud/Test PageParams with ID is invalid',
|
||||
'ml/custom_all_field/Test querying custom all field',
|
||||
'ml/datafeeds_crud/Test delete datafeed with missing id',
|
||||
'ml/datafeeds_crud/Test put datafeed referring to missing job_id',
|
||||
|
@ -7,7 +7,7 @@ minimal:
|
||||
# Give all users involved in these tests access to the indices where the data to
|
||||
# be analyzed is stored, because the ML roles alone do not provide access to
|
||||
# non-ML indices
|
||||
- names: [ 'airline-data', 'index-foo', 'unavailable-data' ]
|
||||
- names: [ 'airline-data', 'index-*', 'unavailable-data', 'utopia' ]
|
||||
privileges:
|
||||
- indices:admin/create
|
||||
- indices:admin/refresh
|
||||
|
@ -32,6 +32,8 @@ integTestCluster {
|
||||
extraConfigFile 'x-pack/roles.yml', 'roles.yml'
|
||||
setupCommand 'setupTestAdminUser',
|
||||
'bin/x-pack/users', 'useradd', 'test_admin', '-p', 'x-pack-test-password', '-r', 'superuser'
|
||||
setupCommand 'setupXpackUserForTests',
|
||||
'bin/x-pack/users', 'useradd', 'x_pack_rest_user', '-p', 'x-pack-test-password', '-r', 'watcher_manager'
|
||||
setupCommand 'setupWatcherManagerUser',
|
||||
'bin/x-pack/users', 'useradd', 'watcher_manager', '-p', 'x-pack-test-password', '-r', 'watcher_manager'
|
||||
setupCommand 'setupPowerlessUser',
|
||||
|
@ -33,7 +33,7 @@ setup() {
|
||||
count=$(find . -type f -name 'x-pack*.zip' | wc -l)
|
||||
[ "$count" -eq 1 ]
|
||||
|
||||
install_and_check_plugin x pack x-pack-*.jar
|
||||
install_xpack
|
||||
}
|
||||
|
||||
@test "[X-PACK] verify x-pack installation" {
|
||||
|
@ -13,7 +13,7 @@ setup() {
|
||||
clean_before_test
|
||||
install
|
||||
|
||||
install_and_check_plugin x pack x-pack-*.jar
|
||||
install_xpack
|
||||
verify_xpack_installation
|
||||
fi
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ if [[ "$BATS_TEST_FILENAME" =~ 60_tar_certgen.bats$ ]]; then
|
||||
DATA_USER=$DEFAULT_PACKAGE_USER
|
||||
DATA_HOME=$DEFAULT_PACKAGE_ESHOME
|
||||
DATA_UTILS=$DEFAULT_PACKAGE_UTILS
|
||||
|
||||
|
||||
install_master_node() {
|
||||
install_node_using_archive
|
||||
}
|
||||
@ -65,7 +65,7 @@ else
|
||||
DATA_USER=$DEFAULT_ARCHIVE_USER
|
||||
DATA_HOME=$DEFAULT_ARCHIVE_ESHOME
|
||||
DATA_UTILS=$DEFAULT_ARCHIVE_UTILS
|
||||
|
||||
|
||||
install_master_node() {
|
||||
install_node_using_package
|
||||
}
|
||||
@ -85,12 +85,12 @@ install_node_using_archive() {
|
||||
load $BATS_UTILS/tar.bash
|
||||
export ESHOME="$DEFAULT_ARCHIVE_ESHOME"
|
||||
export_elasticsearch_paths
|
||||
|
||||
|
||||
install_archive
|
||||
verify_archive_installation
|
||||
|
||||
export ESPLUGIN_COMMAND_USER=$DEFAULT_ARCHIVE_USER
|
||||
install_and_check_plugin x pack x-pack-*.jar
|
||||
install_xpack
|
||||
verify_xpack_installation
|
||||
}
|
||||
|
||||
@ -99,7 +99,7 @@ start_node_using_archive() {
|
||||
load $BATS_UTILS/tar.bash
|
||||
export ESHOME="$DEFAULT_ARCHIVE_ESHOME"
|
||||
export_elasticsearch_paths
|
||||
|
||||
|
||||
run sudo -u $DEFAULT_ARCHIVE_USER "$ESHOME/bin/elasticsearch" -d -p $ESHOME/elasticsearch.pid
|
||||
[ "$status" -eq "0" ] || {
|
||||
echo "Failed to start node using archive: $output"
|
||||
@ -112,12 +112,12 @@ install_node_using_package() {
|
||||
load $BATS_UTILS/packages.bash
|
||||
export ESHOME="$DEFAULT_PACKAGE_ESHOME"
|
||||
export_elasticsearch_paths
|
||||
|
||||
|
||||
install_package
|
||||
verify_package_installation
|
||||
|
||||
export ESPLUGIN_COMMAND_USER=$DEFAULT_PACKAGE_USER
|
||||
install_and_check_plugin x pack x-pack-*.jar
|
||||
install_xpack
|
||||
verify_xpack_installation
|
||||
}
|
||||
|
||||
@ -126,7 +126,7 @@ start_node_using_package() {
|
||||
if is_systemd; then
|
||||
run systemctl daemon-reload
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
|
||||
run sudo systemctl start elasticsearch.service
|
||||
[ "$status" -eq "0" ]
|
||||
|
||||
@ -168,7 +168,7 @@ instances:
|
||||
ip:
|
||||
- "127.0.0.1"
|
||||
- name: "node-data"
|
||||
ip:
|
||||
ip:
|
||||
- "127.0.0.1"
|
||||
EOF
|
||||
CREATE_INSTANCES_FILE
|
||||
@ -199,12 +199,12 @@ CREATE_INSTANCES_FILE
|
||||
load $MASTER_UTILS
|
||||
export ESHOME="$MASTER_HOME"
|
||||
export_elasticsearch_paths
|
||||
|
||||
|
||||
certs="$ESCONFIG/x-pack/certs"
|
||||
if [[ -d "$certs" ]]; then
|
||||
sudo rm -rf "$certs"
|
||||
fi
|
||||
|
||||
|
||||
run sudo -E -u $MASTER_USER "unzip" $certificates -d $certs
|
||||
[ "$status" -eq 0 ] || {
|
||||
echo "Failed to unzip certificates in $certs: $output"
|
||||
@ -213,11 +213,11 @@ CREATE_INSTANCES_FILE
|
||||
|
||||
assert_file "$certs/ca/ca.key" f $MASTER_USER $MASTER_USER 644
|
||||
assert_file "$certs/ca/ca.crt" f $MASTER_USER $MASTER_USER 644
|
||||
|
||||
|
||||
assert_file "$certs/node-master" d $MASTER_USER $MASTER_USER 755
|
||||
assert_file "$certs/node-master/node-master.key" f $MASTER_USER $MASTER_USER 644
|
||||
assert_file "$certs/node-master/node-master.crt" f $MASTER_USER $MASTER_USER 644
|
||||
|
||||
|
||||
assert_file "$certs/node-data" d $MASTER_USER $MASTER_USER 755
|
||||
assert_file "$certs/node-data/node-data.key" f $MASTER_USER $MASTER_USER 644
|
||||
assert_file "$certs/node-data/node-data.crt" f $MASTER_USER $MASTER_USER 644
|
||||
@ -235,8 +235,8 @@ node.master: true
|
||||
node.data: false
|
||||
discovery.zen.ping.unicast.hosts: ["127.0.0.1:9301"]
|
||||
|
||||
xpack.ssl.key: $ESCONFIG/x-pack/certs/node-master/node-master.key
|
||||
xpack.ssl.certificate: $ESCONFIG/x-pack/certs/node-master/node-master.crt
|
||||
xpack.ssl.key: $ESCONFIG/x-pack/certs/node-master/node-master.key
|
||||
xpack.ssl.certificate: $ESCONFIG/x-pack/certs/node-master/node-master.crt
|
||||
xpack.ssl.certificate_authorities: ["$ESCONFIG/x-pack/certs/ca/ca.crt"]
|
||||
|
||||
xpack.security.transport.ssl.enabled: true
|
||||
@ -274,7 +274,7 @@ MASTER_SETTINGS
|
||||
load $DATA_UTILS
|
||||
export ESHOME="$DATA_HOME"
|
||||
export_elasticsearch_paths
|
||||
|
||||
|
||||
sudo chown $DATA_USER:$DATA_USER "$certificates"
|
||||
[ -f "$certificates" ] || {
|
||||
echo "Could not find certificates: $certificates"
|
||||
@ -285,7 +285,7 @@ MASTER_SETTINGS
|
||||
if [[ -d "$certs" ]]; then
|
||||
sudo rm -rf "$certs"
|
||||
fi
|
||||
|
||||
|
||||
run sudo -E -u $DATA_USER "unzip" $certificates -d $certs
|
||||
[ "$status" -eq 0 ] || {
|
||||
echo "Failed to unzip certificates in $certs: $output"
|
||||
@ -295,11 +295,11 @@ MASTER_SETTINGS
|
||||
assert_file "$certs/ca" d $DATA_USER $DATA_USER
|
||||
assert_file "$certs/ca/ca.key" f $DATA_USER $DATA_USER 644
|
||||
assert_file "$certs/ca/ca.crt" f $DATA_USER $DATA_USER 644
|
||||
|
||||
|
||||
assert_file "$certs/node-master" d $DATA_USER $DATA_USER
|
||||
assert_file "$certs/node-master/node-master.key" f $DATA_USER $DATA_USER 644
|
||||
assert_file "$certs/node-master/node-master.crt" f $DATA_USER $DATA_USER 644
|
||||
|
||||
|
||||
assert_file "$certs/node-data" d $DATA_USER $DATA_USER
|
||||
assert_file "$certs/node-data/node-data.key" f $DATA_USER $DATA_USER 644
|
||||
assert_file "$certs/node-data/node-data.crt" f $DATA_USER $DATA_USER 644
|
||||
@ -317,8 +317,8 @@ node.master: false
|
||||
node.data: true
|
||||
discovery.zen.ping.unicast.hosts: ["127.0.0.1:9300"]
|
||||
|
||||
xpack.ssl.key: $ESCONFIG/x-pack/certs/node-data/node-data.key
|
||||
xpack.ssl.certificate: $ESCONFIG/x-pack/certs/node-data/node-data.crt
|
||||
xpack.ssl.key: $ESCONFIG/x-pack/certs/node-data/node-data.key
|
||||
xpack.ssl.certificate: $ESCONFIG/x-pack/certs/node-data/node-data.crt
|
||||
xpack.ssl.certificate_authorities: ["$ESCONFIG/x-pack/certs/ca/ca.crt"]
|
||||
|
||||
xpack.security.transport.ssl.enabled: true
|
||||
@ -370,11 +370,11 @@ DATA_SETTINGS
|
||||
echo "$masterSettings" | grep '"http":{"type":"security4"}'
|
||||
echo "$masterSettings" | grep '"transport":{"ssl":{"enabled":"true"}'
|
||||
echo "$masterSettings" | grep '"transport":{"type":"security4"}'
|
||||
|
||||
|
||||
load $DATA_UTILS
|
||||
export ESHOME="$DATA_HOME"
|
||||
export_elasticsearch_paths
|
||||
|
||||
|
||||
dataSettings=$(curl -u "elastic:changeme" \
|
||||
-H "Content-Type: application/json" \
|
||||
--cacert "$ESCONFIG/x-pack/certs/ca/ca.crt" \
|
||||
@ -384,14 +384,12 @@ DATA_SETTINGS
|
||||
echo "$dataSettings" | grep '"http":{"type":"security4"}'
|
||||
echo "$dataSettings" | grep '"transport":{"ssl":{"enabled":"true"}'
|
||||
echo "$dataSettings" | grep '"transport":{"type":"security4"}'
|
||||
|
||||
|
||||
testSearch=$(curl -u "elastic:changeme" \
|
||||
-H "Content-Type: application/json" \
|
||||
--cacert "$ESCONFIG/x-pack/certs/ca/ca.crt" \
|
||||
-XGET "https://127.0.0.1:9200/_search?q=title:guide")
|
||||
|
||||
|
||||
echo "$testSearch" | grep '"_index":"books"'
|
||||
echo "$testSearch" | grep '"_id":"0"'
|
||||
}
|
||||
|
||||
|
||||
|
@ -45,6 +45,6 @@ fi
|
||||
}
|
||||
|
||||
@test "[$GROUP] keystore exists after install" {
|
||||
install_and_check_plugin x pack x-pack-*.jar
|
||||
install_xpack
|
||||
verify_xpack_installation
|
||||
}
|
||||
|
@ -13,7 +13,7 @@ setup() {
|
||||
clean_before_test
|
||||
install
|
||||
|
||||
install_and_check_plugin x pack x-pack-*.jar
|
||||
install_xpack
|
||||
verify_xpack_installation
|
||||
fi
|
||||
}
|
||||
@ -81,4 +81,3 @@ SETUP_AUTO
|
||||
|
||||
stop_elasticsearch_service
|
||||
}
|
||||
|
||||
|
@ -4,6 +4,12 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License;
|
||||
# you may not use this file except in compliance with the Elastic License.
|
||||
|
||||
install_xpack() {
|
||||
install_and_check_plugin x pack x-pack-core-*.jar x-pack-graph-*.jar x-pack-ml-*.jar \
|
||||
x-pack-monitoring-*.jar x-pack-security-*.jar x-pack-watcher-*.jar
|
||||
}
|
||||
|
||||
# Checks that X-Pack files are correctly installed
|
||||
verify_xpack_installation() {
|
||||
local user="$ESPLUGIN_COMMAND_USER"
|
||||
local group="$ESPLUGIN_COMMAND_USER"
|
||||
@ -47,4 +53,4 @@ wait_for_xpack() {
|
||||
for i in {1..30}; do
|
||||
echo "GET / HTTP/1.0" > /dev/tcp/$host/$port && break || sleep 1;
|
||||
done
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user