Merge branch 'master' into zen2

This commit is contained in:
David Turner 2018-09-14 14:28:28 +02:00
commit 31e8781eaa
391 changed files with 14214 additions and 4133 deletions

View File

@ -831,6 +831,9 @@ class BuildPlugin implements Plugin<Project> {
// TODO: remove this once ctx isn't added to update script params in 7.0
systemProperty 'es.scripting.update.ctx_in_params', 'false'
//TODO: remove this once the cname is prepended to the address by default in 7.0
systemProperty 'es.http.cname_in_publish_address', 'true'
// Set the system keystore/truststore password if we're running tests in a FIPS-140 JVM
if (project.inFipsJvm) {
systemProperty 'javax.net.ssl.trustStorePassword', 'password'

View File

@ -16,7 +16,7 @@ slf4j = 1.6.2
jna = 4.5.1
# test dependencies
randomizedrunner = 2.5.2
randomizedrunner = 2.7.0
junit = 4.12
httpclient = 4.5.2
# When updating httpcore, please also update server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy

View File

@ -28,10 +28,13 @@ import org.apache.http.entity.ByteArrayEntity;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.client.RequestConverters.EndpointBuilder;
import org.elasticsearch.client.ml.CloseJobRequest;
import org.elasticsearch.client.ml.DeleteDatafeedRequest;
import org.elasticsearch.client.ml.DeleteForecastRequest;
import org.elasticsearch.client.ml.DeleteJobRequest;
import org.elasticsearch.client.ml.FlushJobRequest;
import org.elasticsearch.client.ml.ForecastJobRequest;
import org.elasticsearch.client.ml.GetBucketsRequest;
import org.elasticsearch.client.ml.GetCategoriesRequest;
import org.elasticsearch.client.ml.GetInfluencersRequest;
import org.elasticsearch.client.ml.GetJobRequest;
import org.elasticsearch.client.ml.GetJobStatsRequest;
@ -39,6 +42,7 @@ import org.elasticsearch.client.ml.GetOverallBucketsRequest;
import org.elasticsearch.client.ml.GetRecordsRequest;
import org.elasticsearch.client.ml.OpenJobRequest;
import org.elasticsearch.client.ml.PostDataRequest;
import org.elasticsearch.client.ml.PutDatafeedRequest;
import org.elasticsearch.client.ml.PutJobRequest;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.common.Strings;
@ -180,6 +184,51 @@ final class MLRequestConverters {
return request;
}
static Request putDatafeed(PutDatafeedRequest putDatafeedRequest) throws IOException {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("ml")
.addPathPartAsIs("datafeeds")
.addPathPart(putDatafeedRequest.getDatafeed().getId())
.build();
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
request.setEntity(createEntity(putDatafeedRequest, REQUEST_BODY_CONTENT_TYPE));
return request;
}
static Request deleteDatafeed(DeleteDatafeedRequest deleteDatafeedRequest) {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("ml")
.addPathPartAsIs("datafeeds")
.addPathPart(deleteDatafeedRequest.getDatafeedId())
.build();
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
RequestConverters.Params params = new RequestConverters.Params(request);
params.putParam("force", Boolean.toString(deleteDatafeedRequest.isForce()));
return request;
}
static Request deleteForecast(DeleteForecastRequest deleteForecastRequest) throws IOException {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("ml")
.addPathPartAsIs("anomaly_detectors")
.addPathPart(deleteForecastRequest.getJobId())
.addPathPartAsIs("_forecast")
.addPathPart(Strings.collectionToCommaDelimitedString(deleteForecastRequest.getForecastIds()))
.build();
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
RequestConverters.Params params = new RequestConverters.Params(request);
if (deleteForecastRequest.isAllowNoForecasts() != null) {
params.putParam("allow_no_forecasts", Boolean.toString(deleteForecastRequest.isAllowNoForecasts()));
}
if (deleteForecastRequest.timeout() != null) {
params.putParam("timeout", deleteForecastRequest.timeout().getStringRep());
}
return request;
}
static Request getBuckets(GetBucketsRequest getBucketsRequest) throws IOException {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
@ -194,6 +243,20 @@ final class MLRequestConverters {
return request;
}
static Request getCategories(GetCategoriesRequest getCategoriesRequest) throws IOException {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("ml")
.addPathPartAsIs("anomaly_detectors")
.addPathPart(getCategoriesRequest.getJobId())
.addPathPartAsIs("results")
.addPathPartAsIs("categories")
.build();
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
request.setEntity(createEntity(getCategoriesRequest, REQUEST_BODY_CONTENT_TYPE));
return request;
}
static Request getOverallBuckets(GetOverallBucketsRequest getOverallBucketsRequest) throws IOException {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")

View File

@ -19,19 +19,20 @@
package org.elasticsearch.client;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.ml.ForecastJobRequest;
import org.elasticsearch.client.ml.ForecastJobResponse;
import org.elasticsearch.client.ml.PostDataRequest;
import org.elasticsearch.client.ml.PostDataResponse;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.ml.CloseJobRequest;
import org.elasticsearch.client.ml.CloseJobResponse;
import org.elasticsearch.client.ml.DeleteDatafeedRequest;
import org.elasticsearch.client.ml.DeleteForecastRequest;
import org.elasticsearch.client.ml.DeleteJobRequest;
import org.elasticsearch.client.ml.DeleteJobResponse;
import org.elasticsearch.client.ml.FlushJobRequest;
import org.elasticsearch.client.ml.FlushJobResponse;
import org.elasticsearch.client.ml.ForecastJobRequest;
import org.elasticsearch.client.ml.ForecastJobResponse;
import org.elasticsearch.client.ml.GetBucketsRequest;
import org.elasticsearch.client.ml.GetBucketsResponse;
import org.elasticsearch.client.ml.GetCategoriesRequest;
import org.elasticsearch.client.ml.GetCategoriesResponse;
import org.elasticsearch.client.ml.GetInfluencersRequest;
import org.elasticsearch.client.ml.GetInfluencersResponse;
import org.elasticsearch.client.ml.GetJobRequest;
@ -44,13 +45,19 @@ import org.elasticsearch.client.ml.GetRecordsRequest;
import org.elasticsearch.client.ml.GetRecordsResponse;
import org.elasticsearch.client.ml.OpenJobRequest;
import org.elasticsearch.client.ml.OpenJobResponse;
import org.elasticsearch.client.ml.PostDataRequest;
import org.elasticsearch.client.ml.PostDataResponse;
import org.elasticsearch.client.ml.PutDatafeedRequest;
import org.elasticsearch.client.ml.PutDatafeedResponse;
import org.elasticsearch.client.ml.PutJobRequest;
import org.elasticsearch.client.ml.PutJobResponse;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.job.stats.JobStats;
import java.io.IOException;
import java.util.Collections;
/**
* Machine Learning API client wrapper for the {@link RestHighLevelClient}
*
@ -197,11 +204,11 @@ public final class MachineLearningClient {
* @return action acknowledgement
* @throws IOException when there is a serialization issue sending the request or receiving the response
*/
public DeleteJobResponse deleteJob(DeleteJobRequest request, RequestOptions options) throws IOException {
public AcknowledgedResponse deleteJob(DeleteJobRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request,
MLRequestConverters::deleteJob,
options,
DeleteJobResponse::fromXContent,
AcknowledgedResponse::fromXContent,
Collections.emptySet());
}
@ -215,11 +222,11 @@ public final class MachineLearningClient {
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion
*/
public void deleteJobAsync(DeleteJobRequest request, RequestOptions options, ActionListener<DeleteJobResponse> listener) {
public void deleteJobAsync(DeleteJobRequest request, RequestOptions options, ActionListener<AcknowledgedResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request,
MLRequestConverters::deleteJob,
options,
DeleteJobResponse::fromXContent,
AcknowledgedResponse::fromXContent,
listener,
Collections.emptySet());
}
@ -387,6 +394,11 @@ public final class MachineLearningClient {
/**
* Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job}
*
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html"></a>
* </p>
*
* @param request the {@link UpdateJobRequest} object enclosing the desired updates
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return a PutJobResponse object containing the updated job object
@ -425,6 +437,10 @@ public final class MachineLearningClient {
/**
* Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job} asynchronously
*
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html"></a>
* </p>
* @param request the {@link UpdateJobRequest} object enclosing the desired updates
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion
@ -438,6 +454,126 @@ public final class MachineLearningClient {
Collections.emptySet());
}
/**
* Creates a new Machine Learning Datafeed
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html">ML PUT datafeed documentation</a>
*
* @param request The PutDatafeedRequest containing the {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} settings
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return PutDatafeedResponse with enclosed {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} object
* @throws IOException when there is a serialization issue sending the request or receiving the response
*/
public PutDatafeedResponse putDatafeed(PutDatafeedRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request,
MLRequestConverters::putDatafeed,
options,
PutDatafeedResponse::fromXContent,
Collections.emptySet());
}
/**
* Creates a new Machine Learning Datafeed asynchronously and notifies listener on completion
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html">ML PUT datafeed documentation</a>
*
* @param request The request containing the {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} settings
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion
*/
public void putDatafeedAsync(PutDatafeedRequest request, RequestOptions options, ActionListener<PutDatafeedResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request,
MLRequestConverters::putDatafeed,
options,
PutDatafeedResponse::fromXContent,
listener,
Collections.emptySet());
}
/**
* Deletes the given Machine Learning Datafeed
* <p>
* For additional info
* see <a href="http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html">
* ML Delete Datafeed documentation</a>
* </p>
* @param request The request to delete the datafeed
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return action acknowledgement
* @throws IOException when there is a serialization issue sending the request or receiving the response
*/
public AcknowledgedResponse deleteDatafeed(DeleteDatafeedRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request,
MLRequestConverters::deleteDatafeed,
options,
AcknowledgedResponse::fromXContent,
Collections.emptySet());
}
/**
* Deletes the given Machine Learning Datafeed asynchronously and notifies the listener on completion
* <p>
* For additional info
* see <a href="http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-datafeed.html">
* ML Delete Datafeed documentation</a>
* </p>
* @param request The request to delete the datafeed
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion
*/
public void deleteDatafeedAsync(DeleteDatafeedRequest request, RequestOptions options, ActionListener<AcknowledgedResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request,
MLRequestConverters::deleteDatafeed,
options,
AcknowledgedResponse::fromXContent,
listener,
Collections.emptySet());
}
/**
* Deletes Machine Learning Job Forecasts
*
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-forecast.html"></a>
* </p>
*
* @param request the {@link DeleteForecastRequest} object enclosing the desired jobId, forecastIDs, and other options
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return a AcknowledgedResponse object indicating request success
* @throws IOException when there is a serialization issue sending the request or receiving the response
*/
public AcknowledgedResponse deleteForecast(DeleteForecastRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request,
MLRequestConverters::deleteForecast,
options,
AcknowledgedResponse::fromXContent,
Collections.emptySet());
}
/**
* Deletes Machine Learning Job Forecasts asynchronously
*
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-forecast.html"></a>
* </p>
*
* @param request the {@link DeleteForecastRequest} object enclosing the desired jobId, forecastIDs, and other options
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion
*/
public void deleteForecastAsync(DeleteForecastRequest request, RequestOptions options, ActionListener<AcknowledgedResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request,
MLRequestConverters::deleteForecast,
options,
AcknowledgedResponse::fromXContent,
listener,
Collections.emptySet());
}
/**
* Gets the buckets for a Machine Learning Job.
* <p>
@ -474,6 +610,45 @@ public final class MachineLearningClient {
Collections.emptySet());
}
/**
* Gets the categories for a Machine Learning Job.
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html">
* ML GET categories documentation</a>
*
* @param request The request
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @throws IOException when there is a serialization issue sending the request or receiving the response
*/
public GetCategoriesResponse getCategories(GetCategoriesRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request,
MLRequestConverters::getCategories,
options,
GetCategoriesResponse::fromXContent,
Collections.emptySet());
}
/**
* Gets the categories for a Machine Learning Job, notifies listener once the requested buckets are retrieved.
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html">
* ML GET categories documentation</a>
*
* @param request The request
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion
*/
public void getCategoriesAsync(GetCategoriesRequest request, RequestOptions options, ActionListener<GetCategoriesResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request,
MLRequestConverters::getCategories,
options,
GetCategoriesResponse::fromXContent,
listener,
Collections.emptySet());
}
/**
* Gets overall buckets for a set of Machine Learning Jobs.
* <p>

View File

@ -0,0 +1,80 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import java.util.Objects;
/**
* Request to delete a Machine Learning Datafeed via its ID
*/
public class DeleteDatafeedRequest extends ActionRequest {
private String datafeedId;
private boolean force;
public DeleteDatafeedRequest(String datafeedId) {
this.datafeedId = Objects.requireNonNull(datafeedId, "[datafeed_id] must not be null");
}
public String getDatafeedId() {
return datafeedId;
}
public boolean isForce() {
return force;
}
/**
* Used to forcefully delete a started datafeed.
* This method is quicker than stopping and deleting the datafeed.
*
* @param force When {@code true} forcefully delete a started datafeed. Defaults to {@code false}
*/
public void setForce(boolean force) {
this.force = force;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public int hashCode() {
return Objects.hash(datafeedId, force);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || obj.getClass() != getClass()) {
return false;
}
DeleteDatafeedRequest other = (DeleteDatafeedRequest) obj;
return Objects.equals(datafeedId, other.datafeedId) && Objects.equals(force, other.force);
}
}

View File

@ -0,0 +1,183 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
/**
* POJO for a delete forecast request
*/
public class DeleteForecastRequest extends ActionRequest implements ToXContentObject {
public static final ParseField FORECAST_ID = new ParseField("forecast_id");
public static final ParseField ALLOW_NO_FORECASTS = new ParseField("allow_no_forecasts");
public static final ParseField TIMEOUT = new ParseField("timeout");
public static final String ALL = "_all";
public static final ConstructingObjectParser<DeleteForecastRequest, Void> PARSER =
new ConstructingObjectParser<>("delete_forecast_request", (a) -> new DeleteForecastRequest((String) a[0]));
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID);
PARSER.declareStringOrNull(
(c, p) -> c.setForecastIds(Strings.commaDelimitedListToStringArray(p)), FORECAST_ID);
PARSER.declareBoolean(DeleteForecastRequest::setAllowNoForecasts, ALLOW_NO_FORECASTS);
PARSER.declareString(DeleteForecastRequest::timeout, TIMEOUT);
}
/**
* Create a new {@link DeleteForecastRequest} that explicitly deletes all forecasts
*
* @param jobId the jobId of the Job whose forecasts to delete
*/
public static DeleteForecastRequest deleteAllForecasts(String jobId) {
DeleteForecastRequest request = new DeleteForecastRequest(jobId);
request.setForecastIds(ALL);
return request;
}
private final String jobId;
private List<String> forecastIds = new ArrayList<>();
private Boolean allowNoForecasts;
private TimeValue timeout;
/**
* Create a new DeleteForecastRequest for the given Job ID
*
* @param jobId the jobId of the Job whose forecast(s) to delete
*/
public DeleteForecastRequest(String jobId) {
this.jobId = Objects.requireNonNull(jobId, Job.ID.getPreferredName());
}
public String getJobId() {
return jobId;
}
public List<String> getForecastIds() {
return forecastIds;
}
/**
* The forecast IDs to delete. Can be also be {@link DeleteForecastRequest#ALL} to explicitly delete ALL forecasts
*
* @param forecastIds forecast IDs to delete
*/
public void setForecastIds(String... forecastIds) {
setForecastIds(Arrays.asList(forecastIds));
}
void setForecastIds(List<String> forecastIds) {
if (forecastIds.stream().anyMatch(Objects::isNull)) {
throw new NullPointerException("forecastIds must not contain null values");
}
this.forecastIds = new ArrayList<>(forecastIds);
}
public Boolean isAllowNoForecasts() {
return allowNoForecasts;
}
/**
* Sets the `allow_no_forecasts` field.
*
* @param allowNoForecasts when {@code true} no error is thrown when {@link DeleteForecastRequest#ALL} does not find any forecasts
*/
public void setAllowNoForecasts(boolean allowNoForecasts) {
this.allowNoForecasts = allowNoForecasts;
}
/**
* Allows to set the timeout
* @param timeout timeout as a string (e.g. 1s)
*/
public void timeout(String timeout) {
this.timeout = TimeValue.parseTimeValue(timeout, this.timeout, getClass().getSimpleName() + ".timeout");
}
/**
* Allows to set the timeout
* @param timeout timeout as a {@link TimeValue}
*/
public void timeout(TimeValue timeout) {
this.timeout = timeout;
}
public TimeValue timeout() {
return timeout;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
DeleteForecastRequest that = (DeleteForecastRequest) other;
return Objects.equals(jobId, that.jobId) &&
Objects.equals(forecastIds, that.forecastIds) &&
Objects.equals(allowNoForecasts, that.allowNoForecasts) &&
Objects.equals(timeout, that.timeout);
}
@Override
public int hashCode() {
return Objects.hash(jobId, forecastIds, allowNoForecasts, timeout);
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
if (forecastIds != null) {
builder.field(FORECAST_ID.getPreferredName(), Strings.collectionToCommaDelimitedString(forecastIds));
}
if (allowNoForecasts != null) {
builder.field(ALLOW_NO_FORECASTS.getPreferredName(), allowNoForecasts);
}
if (timeout != null) {
builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep());
}
builder.endObject();
return builder;
}
}

View File

@ -1,63 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Objects;
/**
* Response acknowledging the Machine Learning Job request
*/
public class DeleteJobResponse extends AcknowledgedResponse {
public DeleteJobResponse(boolean acknowledged) {
super(acknowledged);
}
public DeleteJobResponse() {
}
public static DeleteJobResponse fromXContent(XContentParser parser) throws IOException {
AcknowledgedResponse response = AcknowledgedResponse.fromXContent(parser);
return new DeleteJobResponse(response.isAcknowledged());
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
DeleteJobResponse that = (DeleteJobResponse) other;
return isAcknowledged() == that.isAcknowledged();
}
@Override
public int hashCode() {
return Objects.hash(isAcknowledged());
}
}

View File

@ -0,0 +1,128 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.client.ml.job.util.PageParams;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Objects;
/**
* A request to retrieve categories of a given job
*/
public class GetCategoriesRequest extends ActionRequest implements ToXContentObject {
public static final ParseField CATEGORY_ID = new ParseField("category_id");
public static final ConstructingObjectParser<GetCategoriesRequest, Void> PARSER = new ConstructingObjectParser<>(
"get_categories_request", a -> new GetCategoriesRequest((String) a[0]));
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID);
PARSER.declareLong(GetCategoriesRequest::setCategoryId, CATEGORY_ID);
PARSER.declareObject(GetCategoriesRequest::setPageParams, PageParams.PARSER, PageParams.PAGE);
}
private final String jobId;
private Long categoryId;
private PageParams pageParams;
/**
* Constructs a request to retrieve category information from a given job
* @param jobId id of the job from which to retrieve results
*/
public GetCategoriesRequest(String jobId) {
this.jobId = Objects.requireNonNull(jobId);
}
public String getJobId() {
return jobId;
}
public PageParams getPageParams() {
return pageParams;
}
public Long getCategoryId() {
return categoryId;
}
/**
* Sets the category id
* @param categoryId the category id
*/
public void setCategoryId(Long categoryId) {
this.categoryId = categoryId;
}
/**
* Sets the paging parameters
* @param pageParams the paging parameters
*/
public void setPageParams(PageParams pageParams) {
this.pageParams = pageParams;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
if (categoryId != null) {
builder.field(CATEGORY_ID.getPreferredName(), categoryId);
}
if (pageParams != null) {
builder.field(PageParams.PAGE.getPreferredName(), pageParams);
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
GetCategoriesRequest request = (GetCategoriesRequest) obj;
return Objects.equals(jobId, request.jobId)
&& Objects.equals(categoryId, request.categoryId)
&& Objects.equals(pageParams, request.pageParams);
}
@Override
public int hashCode() {
return Objects.hash(jobId, categoryId, pageParams);
}
}

View File

@ -0,0 +1,79 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.job.results.CategoryDefinition;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.List;
import java.util.Objects;
/**
* A response containing the requested categories
*/
public class GetCategoriesResponse extends AbstractResultResponse<CategoryDefinition> {
public static final ParseField CATEGORIES = new ParseField("categories");
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<GetCategoriesResponse, Void> PARSER =
new ConstructingObjectParser<>("get_categories_response", true,
a -> new GetCategoriesResponse((List<CategoryDefinition>) a[0], (long) a[1]));
static {
PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), CategoryDefinition.PARSER, CATEGORIES);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), COUNT);
}
public static GetCategoriesResponse fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
GetCategoriesResponse(List<CategoryDefinition> categories, long count) {
super(CATEGORIES, categories, count);
}
/**
* The retrieved categories
* @return the retrieved categories
*/
public List<CategoryDefinition> categories() {
return results;
}
@Override
public int hashCode() {
return Objects.hash(count, results);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
GetCategoriesResponse other = (GetCategoriesResponse) obj;
return count == other.count && Objects.equals(results, other.results);
}
}

View File

@ -0,0 +1,84 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.client.ml.datafeed.DatafeedConfig;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Objects;
/**
* Request to create a new Machine Learning Datafeed given a {@link DatafeedConfig} configuration
*/
public class PutDatafeedRequest extends ActionRequest implements ToXContentObject {
private final DatafeedConfig datafeed;
/**
* Construct a new PutDatafeedRequest
*
* @param datafeed a {@link DatafeedConfig} configuration to create
*/
public PutDatafeedRequest(DatafeedConfig datafeed) {
this.datafeed = datafeed;
}
public DatafeedConfig getDatafeed() {
return datafeed;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return datafeed.toXContent(builder, params);
}
@Override
public boolean equals(Object object) {
if (this == object) {
return true;
}
if (object == null || getClass() != object.getClass()) {
return false;
}
PutDatafeedRequest request = (PutDatafeedRequest) object;
return Objects.equals(datafeed, request.datafeed);
}
@Override
public int hashCode() {
return Objects.hash(datafeed);
}
@Override
public final String toString() {
return Strings.toString(this);
}
@Override
public ActionRequestValidationException validate() {
return null;
}
}

View File

@ -0,0 +1,70 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.datafeed.DatafeedConfig;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Objects;
/**
* Response containing the newly created {@link DatafeedConfig}
*/
public class PutDatafeedResponse implements ToXContentObject {
private DatafeedConfig datafeed;
public static PutDatafeedResponse fromXContent(XContentParser parser) throws IOException {
return new PutDatafeedResponse(DatafeedConfig.PARSER.parse(parser, null).build());
}
PutDatafeedResponse(DatafeedConfig datafeed) {
this.datafeed = datafeed;
}
public DatafeedConfig getResponse() {
return datafeed;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
datafeed.toXContent(builder, params);
return builder;
}
@Override
public boolean equals(Object object) {
if (this == object) {
return true;
}
if (object == null || getClass() != object.getClass()) {
return false;
}
PutDatafeedResponse response = (PutDatafeedResponse) object;
return Objects.equals(datafeed, response.datafeed);
}
@Override
public int hashCode() {
return Objects.hash(datafeed);
}
}

View File

@ -20,36 +20,37 @@ package org.elasticsearch.client.ml.datafeed;
import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.AbstractQueryBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* Datafeed configuration options pojo. Describes where to proactively pull input
* data from.
* <p>
* If a value has not been set it will be <code>null</code>. Object wrappers are
* used around integral types and booleans so they can take <code>null</code>
* values.
* The datafeed configuration object. It specifies which indices
* to get the data from and offers parameters for customizing different
* aspects of the process.
*/
public class DatafeedConfig implements ToXContentObject {
public static final int DEFAULT_SCROLL_SIZE = 1000;
public static final ParseField ID = new ParseField("datafeed_id");
public static final ParseField QUERY_DELAY = new ParseField("query_delay");
public static final ParseField FREQUENCY = new ParseField("frequency");
@ -59,7 +60,6 @@ public class DatafeedConfig implements ToXContentObject {
public static final ParseField QUERY = new ParseField("query");
public static final ParseField SCROLL_SIZE = new ParseField("scroll_size");
public static final ParseField AGGREGATIONS = new ParseField("aggregations");
public static final ParseField AGGS = new ParseField("aggs");
public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields");
public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config");
@ -77,9 +77,8 @@ public class DatafeedConfig implements ToXContentObject {
builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY);
PARSER.declareString((builder, val) ->
builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY);
PARSER.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY);
PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS);
PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS);
PARSER.declareField(Builder::setQuery, DatafeedConfig::parseBytes, QUERY, ObjectParser.ValueType.OBJECT);
PARSER.declareField(Builder::setAggregations, DatafeedConfig::parseBytes, AGGREGATIONS, ObjectParser.ValueType.OBJECT);
PARSER.declareObject(Builder::setScriptFields, (p, c) -> {
List<SearchSourceBuilder.ScriptField> parsedScriptFields = new ArrayList<>();
while (p.nextToken() != XContentParser.Token.END_OBJECT) {
@ -91,29 +90,26 @@ public class DatafeedConfig implements ToXContentObject {
PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, CHUNKING_CONFIG);
}
private static BytesReference parseBytes(XContentParser parser) throws IOException {
XContentBuilder contentBuilder = JsonXContent.contentBuilder();
contentBuilder.generator().copyCurrentStructure(parser);
return BytesReference.bytes(contentBuilder);
}
private final String id;
private final String jobId;
/**
* The delay before starting to query a period of time
*/
private final TimeValue queryDelay;
/**
* The frequency with which queries are executed
*/
private final TimeValue frequency;
private final List<String> indices;
private final List<String> types;
private final QueryBuilder query;
private final AggregatorFactories.Builder aggregations;
private final BytesReference query;
private final BytesReference aggregations;
private final List<SearchSourceBuilder.ScriptField> scriptFields;
private final Integer scrollSize;
private final ChunkingConfig chunkingConfig;
private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices, List<String> types,
QueryBuilder query, AggregatorFactories.Builder aggregations, List<SearchSourceBuilder.ScriptField> scriptFields,
BytesReference query, BytesReference aggregations, List<SearchSourceBuilder.ScriptField> scriptFields,
Integer scrollSize, ChunkingConfig chunkingConfig) {
this.id = id;
this.jobId = jobId;
@ -156,11 +152,11 @@ public class DatafeedConfig implements ToXContentObject {
return scrollSize;
}
public QueryBuilder getQuery() {
public BytesReference getQuery() {
return query;
}
public AggregatorFactories.Builder getAggregations() {
public BytesReference getAggregations() {
return aggregations;
}
@ -183,11 +179,17 @@ public class DatafeedConfig implements ToXContentObject {
if (frequency != null) {
builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep());
}
builder.field(INDICES.getPreferredName(), indices);
builder.field(TYPES.getPreferredName(), types);
builder.field(QUERY.getPreferredName(), query);
if (indices != null) {
builder.field(INDICES.getPreferredName(), indices);
}
if (types != null) {
builder.field(TYPES.getPreferredName(), types);
}
if (query != null) {
builder.field(QUERY.getPreferredName(), asMap(query));
}
if (aggregations != null) {
builder.field(AGGREGATIONS.getPreferredName(), aggregations);
builder.field(AGGREGATIONS.getPreferredName(), asMap(aggregations));
}
if (scriptFields != null) {
builder.startObject(SCRIPT_FIELDS.getPreferredName());
@ -196,7 +198,9 @@ public class DatafeedConfig implements ToXContentObject {
}
builder.endObject();
}
builder.field(SCROLL_SIZE.getPreferredName(), scrollSize);
if (scrollSize != null) {
builder.field(SCROLL_SIZE.getPreferredName(), scrollSize);
}
if (chunkingConfig != null) {
builder.field(CHUNKING_CONFIG.getPreferredName(), chunkingConfig);
}
@ -205,10 +209,18 @@ public class DatafeedConfig implements ToXContentObject {
return builder;
}
private static Map<String, Object> asMap(BytesReference bytesReference) {
return bytesReference == null ? null : XContentHelper.convertToMap(bytesReference, true, XContentType.JSON).v2();
}
/**
* The lists of indices and types are compared for equality but they are not
* sorted first so this test could fail simply because the indices and types
* lists are in different orders.
*
* Also note this could be a heavy operation when a query or aggregations
* are set as we need to convert the bytes references into maps to correctly
* compare them.
*/
@Override
public boolean equals(Object other) {
@ -228,31 +240,40 @@ public class DatafeedConfig implements ToXContentObject {
&& Objects.equals(this.queryDelay, that.queryDelay)
&& Objects.equals(this.indices, that.indices)
&& Objects.equals(this.types, that.types)
&& Objects.equals(this.query, that.query)
&& Objects.equals(asMap(this.query), asMap(that.query))
&& Objects.equals(this.scrollSize, that.scrollSize)
&& Objects.equals(this.aggregations, that.aggregations)
&& Objects.equals(asMap(this.aggregations), asMap(that.aggregations))
&& Objects.equals(this.scriptFields, that.scriptFields)
&& Objects.equals(this.chunkingConfig, that.chunkingConfig);
}
/**
* Note this could be a heavy operation when a query or aggregations
* are set as we need to convert the bytes references into maps to
* compute a stable hash code.
*/
@Override
public int hashCode() {
return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields,
return Objects.hash(id, jobId, frequency, queryDelay, indices, types, asMap(query), scrollSize, asMap(aggregations), scriptFields,
chunkingConfig);
}
public static Builder builder(String id, String jobId) {
return new Builder(id, jobId);
}
public static class Builder {
private String id;
private String jobId;
private TimeValue queryDelay;
private TimeValue frequency;
private List<String> indices = Collections.emptyList();
private List<String> types = Collections.emptyList();
private QueryBuilder query = QueryBuilders.matchAllQuery();
private AggregatorFactories.Builder aggregations;
private List<String> indices;
private List<String> types;
private BytesReference query;
private BytesReference aggregations;
private List<SearchSourceBuilder.ScriptField> scriptFields;
private Integer scrollSize = DEFAULT_SCROLL_SIZE;
private Integer scrollSize;
private ChunkingConfig chunkingConfig;
public Builder(String id, String jobId) {
@ -279,8 +300,12 @@ public class DatafeedConfig implements ToXContentObject {
return this;
}
public Builder setIndices(String... indices) {
return setIndices(Arrays.asList(indices));
}
public Builder setTypes(List<String> types) {
this.types = Objects.requireNonNull(types, TYPES.getPreferredName());
this.types = types;
return this;
}
@ -294,16 +319,36 @@ public class DatafeedConfig implements ToXContentObject {
return this;
}
public Builder setQuery(QueryBuilder query) {
this.query = Objects.requireNonNull(query, QUERY.getPreferredName());
private Builder setQuery(BytesReference query) {
this.query = query;
return this;
}
public Builder setAggregations(AggregatorFactories.Builder aggregations) {
public Builder setQuery(String queryAsJson) {
this.query = queryAsJson == null ? null : new BytesArray(queryAsJson);
return this;
}
public Builder setQuery(QueryBuilder query) throws IOException {
this.query = query == null ? null : xContentToBytes(query);
return this;
}
private Builder setAggregations(BytesReference aggregations) {
this.aggregations = aggregations;
return this;
}
public Builder setAggregations(String aggsAsJson) {
this.aggregations = aggsAsJson == null ? null : new BytesArray(aggsAsJson);
return this;
}
public Builder setAggregations(AggregatorFactories.Builder aggregations) throws IOException {
this.aggregations = aggregations == null ? null : xContentToBytes(aggregations);
return this;
}
public Builder setScriptFields(List<SearchSourceBuilder.ScriptField> scriptFields) {
List<SearchSourceBuilder.ScriptField> sorted = new ArrayList<>(scriptFields);
sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName));
@ -325,5 +370,12 @@ public class DatafeedConfig implements ToXContentObject {
return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize,
chunkingConfig);
}
private static BytesReference xContentToBytes(ToXContentObject object) throws IOException {
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
object.toXContent(builder, ToXContentObject.EMPTY_PARAMS);
return BytesReference.bytes(builder);
}
}
}
}

View File

@ -20,12 +20,17 @@ package org.elasticsearch.client.ml.datafeed;
import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.AbstractQueryBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.builder.SearchSourceBuilder;
@ -35,6 +40,7 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
@ -58,11 +64,9 @@ public class DatafeedUpdate implements ToXContentObject {
TimeValue.parseTimeValue(val, DatafeedConfig.QUERY_DELAY.getPreferredName())), DatafeedConfig.QUERY_DELAY);
PARSER.declareString((builder, val) -> builder.setFrequency(
TimeValue.parseTimeValue(val, DatafeedConfig.FREQUENCY.getPreferredName())), DatafeedConfig.FREQUENCY);
PARSER.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), DatafeedConfig.QUERY);
PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p),
DatafeedConfig.AGGREGATIONS);
PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p),
DatafeedConfig.AGGS);
PARSER.declareField(Builder::setQuery, DatafeedUpdate::parseBytes, DatafeedConfig.QUERY, ObjectParser.ValueType.OBJECT);
PARSER.declareField(Builder::setAggregations, DatafeedUpdate::parseBytes, DatafeedConfig.AGGREGATIONS,
ObjectParser.ValueType.OBJECT);
PARSER.declareObject(Builder::setScriptFields, (p, c) -> {
List<SearchSourceBuilder.ScriptField> parsedScriptFields = new ArrayList<>();
while (p.nextToken() != XContentParser.Token.END_OBJECT) {
@ -74,20 +78,26 @@ public class DatafeedUpdate implements ToXContentObject {
PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, DatafeedConfig.CHUNKING_CONFIG);
}
private static BytesReference parseBytes(XContentParser parser) throws IOException {
XContentBuilder contentBuilder = JsonXContent.contentBuilder();
contentBuilder.generator().copyCurrentStructure(parser);
return BytesReference.bytes(contentBuilder);
}
private final String id;
private final String jobId;
private final TimeValue queryDelay;
private final TimeValue frequency;
private final List<String> indices;
private final List<String> types;
private final QueryBuilder query;
private final AggregatorFactories.Builder aggregations;
private final BytesReference query;
private final BytesReference aggregations;
private final List<SearchSourceBuilder.ScriptField> scriptFields;
private final Integer scrollSize;
private final ChunkingConfig chunkingConfig;
private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices, List<String> types,
QueryBuilder query, AggregatorFactories.Builder aggregations, List<SearchSourceBuilder.ScriptField> scriptFields,
BytesReference query, BytesReference aggregations, List<SearchSourceBuilder.ScriptField> scriptFields,
Integer scrollSize, ChunkingConfig chunkingConfig) {
this.id = id;
this.jobId = jobId;
@ -121,9 +131,13 @@ public class DatafeedUpdate implements ToXContentObject {
builder.field(DatafeedConfig.FREQUENCY.getPreferredName(), frequency.getStringRep());
}
addOptionalField(builder, DatafeedConfig.INDICES, indices);
if (query != null) {
builder.field(DatafeedConfig.QUERY.getPreferredName(), asMap(query));
}
if (aggregations != null) {
builder.field(DatafeedConfig.AGGREGATIONS.getPreferredName(), asMap(aggregations));
}
addOptionalField(builder, DatafeedConfig.TYPES, types);
addOptionalField(builder, DatafeedConfig.QUERY, query);
addOptionalField(builder, DatafeedConfig.AGGREGATIONS, aggregations);
if (scriptFields != null) {
builder.startObject(DatafeedConfig.SCRIPT_FIELDS.getPreferredName());
for (SearchSourceBuilder.ScriptField scriptField : scriptFields) {
@ -167,11 +181,11 @@ public class DatafeedUpdate implements ToXContentObject {
return scrollSize;
}
public QueryBuilder getQuery() {
public BytesReference getQuery() {
return query;
}
public AggregatorFactories.Builder getAggregations() {
public BytesReference getAggregations() {
return aggregations;
}
@ -183,10 +197,18 @@ public class DatafeedUpdate implements ToXContentObject {
return chunkingConfig;
}
private static Map<String, Object> asMap(BytesReference bytesReference) {
return bytesReference == null ? null : XContentHelper.convertToMap(bytesReference, true, XContentType.JSON).v2();
}
/**
* The lists of indices and types are compared for equality but they are not
* sorted first so this test could fail simply because the indices and types
* lists are in different orders.
*
* Also note this could be a heavy operation when a query or aggregations
* are set as we need to convert the bytes references into maps to correctly
* compare them.
*/
@Override
public boolean equals(Object other) {
@ -206,19 +228,28 @@ public class DatafeedUpdate implements ToXContentObject {
&& Objects.equals(this.queryDelay, that.queryDelay)
&& Objects.equals(this.indices, that.indices)
&& Objects.equals(this.types, that.types)
&& Objects.equals(this.query, that.query)
&& Objects.equals(asMap(this.query), asMap(that.query))
&& Objects.equals(this.scrollSize, that.scrollSize)
&& Objects.equals(this.aggregations, that.aggregations)
&& Objects.equals(asMap(this.aggregations), asMap(that.aggregations))
&& Objects.equals(this.scriptFields, that.scriptFields)
&& Objects.equals(this.chunkingConfig, that.chunkingConfig);
}
/**
* Note this could be a heavy operation when a query or aggregations
* are set as we need to convert the bytes references into maps to
* compute a stable hash code.
*/
@Override
public int hashCode() {
return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields,
return Objects.hash(id, jobId, frequency, queryDelay, indices, types, asMap(query), scrollSize, asMap(aggregations), scriptFields,
chunkingConfig);
}
public static Builder builder(String id) {
return new Builder(id);
}
public static class Builder {
private String id;
@ -227,8 +258,8 @@ public class DatafeedUpdate implements ToXContentObject {
private TimeValue frequency;
private List<String> indices;
private List<String> types;
private QueryBuilder query;
private AggregatorFactories.Builder aggregations;
private BytesReference query;
private BytesReference aggregations;
private List<SearchSourceBuilder.ScriptField> scriptFields;
private Integer scrollSize;
private ChunkingConfig chunkingConfig;
@ -276,16 +307,36 @@ public class DatafeedUpdate implements ToXContentObject {
return this;
}
public Builder setQuery(QueryBuilder query) {
private Builder setQuery(BytesReference query) {
this.query = query;
return this;
}
public Builder setAggregations(AggregatorFactories.Builder aggregations) {
public Builder setQuery(String queryAsJson) {
this.query = queryAsJson == null ? null : new BytesArray(queryAsJson);
return this;
}
public Builder setQuery(QueryBuilder query) throws IOException {
this.query = query == null ? null : xContentToBytes(query);
return this;
}
private Builder setAggregations(BytesReference aggregations) {
this.aggregations = aggregations;
return this;
}
public Builder setAggregations(String aggsAsJson) {
this.aggregations = aggsAsJson == null ? null : new BytesArray(aggsAsJson);
return this;
}
public Builder setAggregations(AggregatorFactories.Builder aggregations) throws IOException {
this.aggregations = aggregations == null ? null : xContentToBytes(aggregations);
return this;
}
public Builder setScriptFields(List<SearchSourceBuilder.ScriptField> scriptFields) {
List<SearchSourceBuilder.ScriptField> sorted = new ArrayList<>(scriptFields);
sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName));
@ -307,5 +358,12 @@ public class DatafeedUpdate implements ToXContentObject {
return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize,
chunkingConfig);
}
private static BytesReference xContentToBytes(ToXContentObject object) throws IOException {
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
object.toXContent(builder, ToXContentObject.EMPTY_PARAMS);
return BytesReference.bytes(builder);
}
}
}
}

View File

@ -24,10 +24,13 @@ import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut;
import org.elasticsearch.client.ml.CloseJobRequest;
import org.elasticsearch.client.ml.DeleteDatafeedRequest;
import org.elasticsearch.client.ml.DeleteForecastRequest;
import org.elasticsearch.client.ml.DeleteJobRequest;
import org.elasticsearch.client.ml.FlushJobRequest;
import org.elasticsearch.client.ml.ForecastJobRequest;
import org.elasticsearch.client.ml.GetBucketsRequest;
import org.elasticsearch.client.ml.GetCategoriesRequest;
import org.elasticsearch.client.ml.GetInfluencersRequest;
import org.elasticsearch.client.ml.GetJobRequest;
import org.elasticsearch.client.ml.GetJobStatsRequest;
@ -35,14 +38,18 @@ import org.elasticsearch.client.ml.GetOverallBucketsRequest;
import org.elasticsearch.client.ml.GetRecordsRequest;
import org.elasticsearch.client.ml.OpenJobRequest;
import org.elasticsearch.client.ml.PostDataRequest;
import org.elasticsearch.client.ml.PutDatafeedRequest;
import org.elasticsearch.client.ml.PutJobRequest;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.datafeed.DatafeedConfig;
import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests;
import org.elasticsearch.client.ml.job.config.AnalysisConfig;
import org.elasticsearch.client.ml.job.config.Detector;
import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.client.ml.job.config.JobUpdate;
import org.elasticsearch.client.ml.job.config.JobUpdateTests;
import org.elasticsearch.client.ml.job.util.PageParams;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
@ -203,6 +210,61 @@ public class MLRequestConvertersTests extends ESTestCase {
}
}
public void testPutDatafeed() throws IOException {
DatafeedConfig datafeed = DatafeedConfigTests.createRandom();
PutDatafeedRequest putDatafeedRequest = new PutDatafeedRequest(datafeed);
Request request = MLRequestConverters.putDatafeed(putDatafeedRequest);
assertEquals(HttpPut.METHOD_NAME, request.getMethod());
assertThat(request.getEndpoint(), equalTo("/_xpack/ml/datafeeds/" + datafeed.getId()));
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
DatafeedConfig parsedDatafeed = DatafeedConfig.PARSER.apply(parser, null).build();
assertThat(parsedDatafeed, equalTo(datafeed));
}
}
public void testDeleteDatafeed() {
String datafeedId = randomAlphaOfLength(10);
DeleteDatafeedRequest deleteDatafeedRequest = new DeleteDatafeedRequest(datafeedId);
Request request = MLRequestConverters.deleteDatafeed(deleteDatafeedRequest);
assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
assertEquals("/_xpack/ml/datafeeds/" + datafeedId, request.getEndpoint());
assertEquals(Boolean.toString(false), request.getParameters().get("force"));
deleteDatafeedRequest.setForce(true);
request = MLRequestConverters.deleteDatafeed(deleteDatafeedRequest);
assertEquals(Boolean.toString(true), request.getParameters().get("force"));
}
public void testDeleteForecast() throws Exception {
String jobId = randomAlphaOfLength(10);
DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest(jobId);
Request request = MLRequestConverters.deleteForecast(deleteForecastRequest);
assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_forecast", request.getEndpoint());
assertFalse(request.getParameters().containsKey("timeout"));
assertFalse(request.getParameters().containsKey("allow_no_forecasts"));
deleteForecastRequest.setForecastIds(randomAlphaOfLength(10), randomAlphaOfLength(10));
deleteForecastRequest.timeout("10s");
deleteForecastRequest.setAllowNoForecasts(true);
request = MLRequestConverters.deleteForecast(deleteForecastRequest);
assertEquals(
"/_xpack/ml/anomaly_detectors/" +
jobId +
"/_forecast/" +
Strings.collectionToCommaDelimitedString(deleteForecastRequest.getForecastIds()),
request.getEndpoint());
assertEquals("10s",
request.getParameters().get(DeleteForecastRequest.TIMEOUT.getPreferredName()));
assertEquals(Boolean.toString(true),
request.getParameters().get(DeleteForecastRequest.ALLOW_NO_FORECASTS.getPreferredName()));
}
public void testGetBuckets() throws IOException {
String jobId = randomAlphaOfLength(10);
GetBucketsRequest getBucketsRequest = new GetBucketsRequest(jobId);
@ -220,6 +282,21 @@ public class MLRequestConvertersTests extends ESTestCase {
}
}
public void testGetCategories() throws IOException {
String jobId = randomAlphaOfLength(10);
GetCategoriesRequest getCategoriesRequest = new GetCategoriesRequest(jobId);
getCategoriesRequest.setPageParams(new PageParams(100, 300));
Request request = MLRequestConverters.getCategories(getCategoriesRequest);
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/results/categories", request.getEndpoint());
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
GetCategoriesRequest parsedRequest = GetCategoriesRequest.PARSER.apply(parser, null);
assertThat(parsedRequest, equalTo(getCategoriesRequest));
}
}
public void testGetOverallBuckets() throws IOException {
String jobId = randomAlphaOfLength(10);
GetOverallBucketsRequest getOverallBucketsRequest = new GetOverallBucketsRequest(jobId);

View File

@ -23,6 +23,8 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.ml.GetBucketsRequest;
import org.elasticsearch.client.ml.GetBucketsResponse;
import org.elasticsearch.client.ml.GetCategoriesRequest;
import org.elasticsearch.client.ml.GetCategoriesResponse;
import org.elasticsearch.client.ml.GetInfluencersRequest;
import org.elasticsearch.client.ml.GetInfluencersResponse;
import org.elasticsearch.client.ml.GetOverallBucketsRequest;
@ -126,11 +128,150 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
bulkRequest.add(indexRequest);
}
private void addCategoryIndexRequest(long categoryId, String categoryName, BulkRequest bulkRequest) {
IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC);
indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"category_id\": " + categoryId + ", \"terms\": \"" +
categoryName + "\", \"regex\": \".*?" + categoryName + ".*\", \"max_matching_length\": 3, \"examples\": [\"" +
categoryName + "\"]}", XContentType.JSON);
bulkRequest.add(indexRequest);
}
private void addCategoriesIndexRequests(BulkRequest bulkRequest) {
List<String> categories = Arrays.asList("AAL", "JZA", "JBU");
for (int i = 0; i < categories.size(); i++) {
addCategoryIndexRequest(i+1, categories.get(i), bulkRequest);
}
}
@After
public void deleteJob() throws IOException {
new MlRestTestStateCleaner(logger, client()).clearMlMetadata();
}
public void testGetCategories() throws IOException {
// index some category results
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
addCategoriesIndexRequests(bulkRequest);
highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT);
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
{
GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID);
request.setPageParams(new PageParams(0, 10000));
GetCategoriesResponse response = execute(request, machineLearningClient::getCategories,
machineLearningClient::getCategoriesAsync);
assertThat(response.count(), equalTo(3L));
assertThat(response.categories().size(), equalTo(3));
assertThat(response.categories().get(0).getCategoryId(), equalTo(1L));
assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?AAL.*"));
assertThat(response.categories().get(0).getRegex(), equalTo(".*?AAL.*"));
assertThat(response.categories().get(0).getTerms(), equalTo("AAL"));
assertThat(response.categories().get(1).getCategoryId(), equalTo(2L));
assertThat(response.categories().get(1).getGrokPattern(), equalTo(".*?JZA.*"));
assertThat(response.categories().get(1).getRegex(), equalTo(".*?JZA.*"));
assertThat(response.categories().get(1).getTerms(), equalTo("JZA"));
assertThat(response.categories().get(2).getCategoryId(), equalTo(3L));
assertThat(response.categories().get(2).getGrokPattern(), equalTo(".*?JBU.*"));
assertThat(response.categories().get(2).getRegex(), equalTo(".*?JBU.*"));
assertThat(response.categories().get(2).getTerms(), equalTo("JBU"));
}
{
GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID);
request.setPageParams(new PageParams(0, 1));
GetCategoriesResponse response = execute(request, machineLearningClient::getCategories,
machineLearningClient::getCategoriesAsync);
assertThat(response.count(), equalTo(3L));
assertThat(response.categories().size(), equalTo(1));
assertThat(response.categories().get(0).getCategoryId(), equalTo(1L));
assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?AAL.*"));
assertThat(response.categories().get(0).getRegex(), equalTo(".*?AAL.*"));
assertThat(response.categories().get(0).getTerms(), equalTo("AAL"));
}
{
GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID);
request.setPageParams(new PageParams(1, 2));
GetCategoriesResponse response = execute(request, machineLearningClient::getCategories,
machineLearningClient::getCategoriesAsync);
assertThat(response.count(), equalTo(3L));
assertThat(response.categories().size(), equalTo(2));
assertThat(response.categories().get(0).getCategoryId(), equalTo(2L));
assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?JZA.*"));
assertThat(response.categories().get(0).getRegex(), equalTo(".*?JZA.*"));
assertThat(response.categories().get(0).getTerms(), equalTo("JZA"));
assertThat(response.categories().get(1).getCategoryId(), equalTo(3L));
assertThat(response.categories().get(1).getGrokPattern(), equalTo(".*?JBU.*"));
assertThat(response.categories().get(1).getRegex(), equalTo(".*?JBU.*"));
assertThat(response.categories().get(1).getTerms(), equalTo("JBU"));
}
{
GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID);
request.setCategoryId(0L); // request a non-existent category
GetCategoriesResponse response = execute(request, machineLearningClient::getCategories,
machineLearningClient::getCategoriesAsync);
assertThat(response.count(), equalTo(0L));
assertThat(response.categories().size(), equalTo(0));
}
{
GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID);
request.setCategoryId(1L);
GetCategoriesResponse response = execute(request, machineLearningClient::getCategories,
machineLearningClient::getCategoriesAsync);
assertThat(response.count(), equalTo(1L));
assertThat(response.categories().size(), equalTo(1));
assertThat(response.categories().get(0).getCategoryId(), equalTo(1L));
assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?AAL.*"));
assertThat(response.categories().get(0).getRegex(), equalTo(".*?AAL.*"));
assertThat(response.categories().get(0).getTerms(), equalTo("AAL"));
}
{
GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID);
request.setCategoryId(2L);
GetCategoriesResponse response = execute(request, machineLearningClient::getCategories,
machineLearningClient::getCategoriesAsync);
assertThat(response.count(), equalTo(1L));
assertThat(response.categories().get(0).getCategoryId(), equalTo(2L));
assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?JZA.*"));
assertThat(response.categories().get(0).getRegex(), equalTo(".*?JZA.*"));
assertThat(response.categories().get(0).getTerms(), equalTo("JZA"));
}
{
GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID);
request.setCategoryId(3L);
GetCategoriesResponse response = execute(request, machineLearningClient::getCategories,
machineLearningClient::getCategoriesAsync);
assertThat(response.count(), equalTo(1L));
assertThat(response.categories().get(0).getCategoryId(), equalTo(3L));
assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?JBU.*"));
assertThat(response.categories().get(0).getRegex(), equalTo(".*?JBU.*"));
assertThat(response.categories().get(0).getTerms(), equalTo("JBU"));
}
}
public void testGetBuckets() throws IOException {
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();

View File

@ -20,33 +20,40 @@ package org.elasticsearch.client;
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.client.ml.ForecastJobRequest;
import org.elasticsearch.client.ml.ForecastJobResponse;
import org.elasticsearch.client.ml.PostDataRequest;
import org.elasticsearch.client.ml.PostDataResponse;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.job.config.JobUpdate;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.client.ml.GetJobStatsRequest;
import org.elasticsearch.client.ml.GetJobStatsResponse;
import org.elasticsearch.client.ml.job.config.JobState;
import org.elasticsearch.client.ml.job.stats.JobStats;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.ml.CloseJobRequest;
import org.elasticsearch.client.ml.CloseJobResponse;
import org.elasticsearch.client.ml.DeleteDatafeedRequest;
import org.elasticsearch.client.ml.DeleteForecastRequest;
import org.elasticsearch.client.ml.DeleteJobRequest;
import org.elasticsearch.client.ml.DeleteJobResponse;
import org.elasticsearch.client.ml.FlushJobRequest;
import org.elasticsearch.client.ml.FlushJobResponse;
import org.elasticsearch.client.ml.ForecastJobRequest;
import org.elasticsearch.client.ml.ForecastJobResponse;
import org.elasticsearch.client.ml.GetJobRequest;
import org.elasticsearch.client.ml.GetJobResponse;
import org.elasticsearch.client.ml.GetJobStatsRequest;
import org.elasticsearch.client.ml.GetJobStatsResponse;
import org.elasticsearch.client.ml.OpenJobRequest;
import org.elasticsearch.client.ml.OpenJobResponse;
import org.elasticsearch.client.ml.PostDataRequest;
import org.elasticsearch.client.ml.PostDataResponse;
import org.elasticsearch.client.ml.PutDatafeedRequest;
import org.elasticsearch.client.ml.PutDatafeedResponse;
import org.elasticsearch.client.ml.PutJobRequest;
import org.elasticsearch.client.ml.PutJobResponse;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.datafeed.DatafeedConfig;
import org.elasticsearch.client.ml.job.config.AnalysisConfig;
import org.elasticsearch.client.ml.job.config.DataDescription;
import org.elasticsearch.client.ml.job.config.Detector;
import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.client.ml.FlushJobRequest;
import org.elasticsearch.client.ml.FlushJobResponse;
import org.elasticsearch.client.ml.job.config.JobState;
import org.elasticsearch.client.ml.job.config.JobUpdate;
import org.elasticsearch.client.ml.job.stats.JobStats;
import org.elasticsearch.common.unit.TimeValue;
import org.junit.After;
import java.io.IOException;
@ -122,7 +129,7 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
DeleteJobResponse response = execute(new DeleteJobRequest(jobId),
AcknowledgedResponse response = execute(new DeleteJobRequest(jobId),
machineLearningClient::deleteJob,
machineLearningClient::deleteJobAsync);
@ -288,6 +295,108 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
assertEquals("Updated description", getResponse.jobs().get(0).getDescription());
}
public void testPutDatafeed() throws Exception {
String jobId = randomValidJobId();
Job job = buildJob(jobId);
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
execute(new PutJobRequest(job), machineLearningClient::putJob, machineLearningClient::putJobAsync);
String datafeedId = "datafeed-" + jobId;
DatafeedConfig datafeedConfig = DatafeedConfig.builder(datafeedId, jobId).setIndices("some_data_index").build();
PutDatafeedResponse response = execute(new PutDatafeedRequest(datafeedConfig), machineLearningClient::putDatafeed,
machineLearningClient::putDatafeedAsync);
DatafeedConfig createdDatafeed = response.getResponse();
assertThat(createdDatafeed.getId(), equalTo(datafeedId));
assertThat(createdDatafeed.getIndices(), equalTo(datafeedConfig.getIndices()));
}
public void testDeleteDatafeed() throws Exception {
String jobId = randomValidJobId();
Job job = buildJob(jobId);
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
String datafeedId = "datafeed-" + jobId;
DatafeedConfig datafeedConfig = DatafeedConfig.builder(datafeedId, jobId).setIndices("some_data_index").build();
execute(new PutDatafeedRequest(datafeedConfig), machineLearningClient::putDatafeed, machineLearningClient::putDatafeedAsync);
AcknowledgedResponse response = execute(new DeleteDatafeedRequest(datafeedId), machineLearningClient::deleteDatafeed,
machineLearningClient::deleteDatafeedAsync);
assertTrue(response.isAcknowledged());
}
public void testDeleteForecast() throws Exception {
String jobId = "test-delete-forecast";
Job job = buildJob(jobId);
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT);
Job noForecastsJob = buildJob("test-delete-forecast-none");
machineLearningClient.putJob(new PutJobRequest(noForecastsJob), RequestOptions.DEFAULT);
PostDataRequest.JsonBuilder builder = new PostDataRequest.JsonBuilder();
for(int i = 0; i < 30; i++) {
Map<String, Object> hashMap = new HashMap<>();
hashMap.put("total", randomInt(1000));
hashMap.put("timestamp", (i+1)*1000);
builder.addDoc(hashMap);
}
PostDataRequest postDataRequest = new PostDataRequest(jobId, builder);
machineLearningClient.postData(postDataRequest, RequestOptions.DEFAULT);
machineLearningClient.flushJob(new FlushJobRequest(jobId), RequestOptions.DEFAULT);
ForecastJobResponse forecastJobResponse1 = machineLearningClient.forecastJob(new ForecastJobRequest(jobId), RequestOptions.DEFAULT);
ForecastJobResponse forecastJobResponse2 = machineLearningClient.forecastJob(new ForecastJobRequest(jobId), RequestOptions.DEFAULT);
waitForForecastToComplete(jobId, forecastJobResponse1.getForecastId());
waitForForecastToComplete(jobId, forecastJobResponse2.getForecastId());
{
DeleteForecastRequest request = new DeleteForecastRequest(jobId);
request.setForecastIds(forecastJobResponse1.getForecastId(), forecastJobResponse2.getForecastId());
AcknowledgedResponse response = execute(request, machineLearningClient::deleteForecast,
machineLearningClient::deleteForecastAsync);
assertTrue(response.isAcknowledged());
assertFalse(forecastExists(jobId, forecastJobResponse1.getForecastId()));
assertFalse(forecastExists(jobId, forecastJobResponse2.getForecastId()));
}
{
DeleteForecastRequest request = DeleteForecastRequest.deleteAllForecasts(noForecastsJob.getId());
request.setAllowNoForecasts(true);
AcknowledgedResponse response = execute(request, machineLearningClient::deleteForecast,
machineLearningClient::deleteForecastAsync);
assertTrue(response.isAcknowledged());
}
{
DeleteForecastRequest request = DeleteForecastRequest.deleteAllForecasts(noForecastsJob.getId());
request.setAllowNoForecasts(false);
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class,
() -> execute(request, machineLearningClient::deleteForecast, machineLearningClient::deleteForecastAsync));
assertThat(exception.status().getStatus(), equalTo(404));
}
}
private void waitForForecastToComplete(String jobId, String forecastId) throws Exception {
GetRequest request = new GetRequest(".ml-anomalies-" + jobId);
request.id(jobId + "_model_forecast_request_stats_" + forecastId);
assertBusy(() -> {
GetResponse getResponse = highLevelClient().get(request, RequestOptions.DEFAULT);
assertTrue(getResponse.isExists());
assertTrue(getResponse.getSourceAsString().contains("finished"));
}, 30, TimeUnit.SECONDS);
}
private boolean forecastExists(String jobId, String forecastId) throws Exception {
GetRequest getRequest = new GetRequest(".ml-anomalies-" + jobId);
getRequest.id(jobId + "_model_forecast_request_stats_" + forecastId);
GetResponse getResponse = highLevelClient().get(getRequest, RequestOptions.DEFAULT);
return getResponse.isExists();
}
public static String randomValidJobId() {
CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray());
return generator.ofCodePointsLength(random(), 10, 10);

View File

@ -21,8 +21,11 @@ package org.elasticsearch.client.documentation;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.LatchedActionListener;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.MachineLearningGetResultsIT;
import org.elasticsearch.client.MachineLearningIT;
@ -31,14 +34,17 @@ import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.ml.CloseJobRequest;
import org.elasticsearch.client.ml.CloseJobResponse;
import org.elasticsearch.client.ml.DeleteDatafeedRequest;
import org.elasticsearch.client.ml.DeleteForecastRequest;
import org.elasticsearch.client.ml.DeleteJobRequest;
import org.elasticsearch.client.ml.DeleteJobResponse;
import org.elasticsearch.client.ml.FlushJobRequest;
import org.elasticsearch.client.ml.FlushJobResponse;
import org.elasticsearch.client.ml.ForecastJobRequest;
import org.elasticsearch.client.ml.ForecastJobResponse;
import org.elasticsearch.client.ml.GetBucketsRequest;
import org.elasticsearch.client.ml.GetBucketsResponse;
import org.elasticsearch.client.ml.GetCategoriesRequest;
import org.elasticsearch.client.ml.GetCategoriesResponse;
import org.elasticsearch.client.ml.GetInfluencersRequest;
import org.elasticsearch.client.ml.GetInfluencersResponse;
import org.elasticsearch.client.ml.GetJobRequest;
@ -53,28 +59,36 @@ import org.elasticsearch.client.ml.OpenJobRequest;
import org.elasticsearch.client.ml.OpenJobResponse;
import org.elasticsearch.client.ml.PostDataRequest;
import org.elasticsearch.client.ml.PostDataResponse;
import org.elasticsearch.client.ml.PutDatafeedRequest;
import org.elasticsearch.client.ml.PutDatafeedResponse;
import org.elasticsearch.client.ml.PutJobRequest;
import org.elasticsearch.client.ml.PutJobResponse;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.datafeed.ChunkingConfig;
import org.elasticsearch.client.ml.datafeed.DatafeedConfig;
import org.elasticsearch.client.ml.job.config.AnalysisConfig;
import org.elasticsearch.client.ml.job.config.AnalysisLimits;
import org.elasticsearch.client.ml.job.config.DataDescription;
import org.elasticsearch.client.ml.job.config.DetectionRule;
import org.elasticsearch.client.ml.job.config.Detector;
import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.client.ml.job.process.DataCounts;
import org.elasticsearch.client.ml.job.config.JobUpdate;
import org.elasticsearch.client.ml.job.config.ModelPlotConfig;
import org.elasticsearch.client.ml.job.config.Operator;
import org.elasticsearch.client.ml.job.config.RuleCondition;
import org.elasticsearch.client.ml.job.process.DataCounts;
import org.elasticsearch.client.ml.job.results.AnomalyRecord;
import org.elasticsearch.client.ml.job.results.Bucket;
import org.elasticsearch.client.ml.job.results.CategoryDefinition;
import org.elasticsearch.client.ml.job.results.Influencer;
import org.elasticsearch.client.ml.job.results.OverallBucket;
import org.elasticsearch.client.ml.job.stats.JobStats;
import org.elasticsearch.client.ml.job.util.PageParams;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.junit.After;
import java.io.IOException;
@ -90,6 +104,7 @@ import java.util.stream.Collectors;
import static org.hamcrest.Matchers.closeTo;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.core.Is.is;
@ -182,8 +197,6 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
public void testGetJob() throws Exception {
RestHighLevelClient client = highLevelClient();
String jobId = "get-machine-learning-job1";
Job job = MachineLearningIT.buildJob("get-machine-learning-job1");
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
@ -251,7 +264,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
//tag::x-pack-delete-ml-job-request
DeleteJobRequest deleteJobRequest = new DeleteJobRequest("my-first-machine-learning-job");
deleteJobRequest.setForce(false); //<1>
DeleteJobResponse deleteJobResponse = client.machineLearning().deleteJob(deleteJobRequest, RequestOptions.DEFAULT);
AcknowledgedResponse deleteJobResponse = client.machineLearning().deleteJob(deleteJobRequest, RequestOptions.DEFAULT);
//end::x-pack-delete-ml-job-request
//tag::x-pack-delete-ml-job-response
@ -260,9 +273,9 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
}
{
//tag::x-pack-delete-ml-job-request-listener
ActionListener<DeleteJobResponse> listener = new ActionListener<DeleteJobResponse>() {
ActionListener<AcknowledgedResponse> listener = new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(DeleteJobResponse deleteJobResponse) {
public void onResponse(AcknowledgedResponse acknowledgedResponse) {
// <1>
}
@ -473,7 +486,162 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
public void testPutDatafeed() throws Exception {
RestHighLevelClient client = highLevelClient();
{
// We need to create a job for the datafeed request to be valid
String jobId = "put-datafeed-job-1";
Job job = MachineLearningIT.buildJob(jobId);
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
String id = "datafeed-1";
//tag::x-pack-ml-create-datafeed-config
DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder(id, jobId) // <1>
.setIndices("index_1", "index_2"); // <2>
//end::x-pack-ml-create-datafeed-config
AggregatorFactories.Builder aggs = AggregatorFactories.builder();
//tag::x-pack-ml-create-datafeed-config-set-aggregations
datafeedBuilder.setAggregations(aggs); // <1>
//end::x-pack-ml-create-datafeed-config-set-aggregations
// Clearing aggregation to avoid complex validation rules
datafeedBuilder.setAggregations((String) null);
//tag::x-pack-ml-create-datafeed-config-set-chunking-config
datafeedBuilder.setChunkingConfig(ChunkingConfig.newAuto()); // <1>
//end::x-pack-ml-create-datafeed-config-set-chunking-config
//tag::x-pack-ml-create-datafeed-config-set-frequency
datafeedBuilder.setFrequency(TimeValue.timeValueSeconds(30)); // <1>
//end::x-pack-ml-create-datafeed-config-set-frequency
//tag::x-pack-ml-create-datafeed-config-set-query
datafeedBuilder.setQuery(QueryBuilders.matchAllQuery()); // <1>
//end::x-pack-ml-create-datafeed-config-set-query
//tag::x-pack-ml-create-datafeed-config-set-query-delay
datafeedBuilder.setQueryDelay(TimeValue.timeValueMinutes(1)); // <1>
//end::x-pack-ml-create-datafeed-config-set-query-delay
List<SearchSourceBuilder.ScriptField> scriptFields = Collections.emptyList();
//tag::x-pack-ml-create-datafeed-config-set-script-fields
datafeedBuilder.setScriptFields(scriptFields); // <1>
//end::x-pack-ml-create-datafeed-config-set-script-fields
//tag::x-pack-ml-create-datafeed-config-set-scroll-size
datafeedBuilder.setScrollSize(1000); // <1>
//end::x-pack-ml-create-datafeed-config-set-scroll-size
//tag::x-pack-ml-put-datafeed-request
PutDatafeedRequest request = new PutDatafeedRequest(datafeedBuilder.build()); // <1>
//end::x-pack-ml-put-datafeed-request
//tag::x-pack-ml-put-datafeed-execute
PutDatafeedResponse response = client.machineLearning().putDatafeed(request, RequestOptions.DEFAULT);
//end::x-pack-ml-put-datafeed-execute
//tag::x-pack-ml-put-datafeed-response
DatafeedConfig datafeed = response.getResponse(); // <1>
//end::x-pack-ml-put-datafeed-response
assertThat(datafeed.getId(), equalTo("datafeed-1"));
}
{
// We need to create a job for the datafeed request to be valid
String jobId = "put-datafeed-job-2";
Job job = MachineLearningIT.buildJob(jobId);
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
String id = "datafeed-2";
DatafeedConfig datafeed = new DatafeedConfig.Builder(id, jobId).setIndices("index_1", "index_2").build();
PutDatafeedRequest request = new PutDatafeedRequest(datafeed);
// tag::x-pack-ml-put-datafeed-execute-listener
ActionListener<PutDatafeedResponse> listener = new ActionListener<PutDatafeedResponse>() {
@Override
public void onResponse(PutDatafeedResponse response) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::x-pack-ml-put-datafeed-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::x-pack-ml-put-datafeed-execute-async
client.machineLearning().putDatafeedAsync(request, RequestOptions.DEFAULT, listener); // <1>
// end::x-pack-ml-put-datafeed-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
public void testDeleteDatafeed() throws Exception {
RestHighLevelClient client = highLevelClient();
String jobId = "test-delete-datafeed-job";
Job job = MachineLearningIT.buildJob(jobId);
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
String datafeedId = "test-delete-datafeed";
DatafeedConfig datafeed = DatafeedConfig.builder(datafeedId, jobId).setIndices("foo").build();
client.machineLearning().putDatafeed(new PutDatafeedRequest(datafeed), RequestOptions.DEFAULT);
{
//tag::x-pack-delete-ml-datafeed-request
DeleteDatafeedRequest deleteDatafeedRequest = new DeleteDatafeedRequest(datafeedId);
deleteDatafeedRequest.setForce(false); //<1>
AcknowledgedResponse deleteDatafeedResponse = client.machineLearning().deleteDatafeed(
deleteDatafeedRequest, RequestOptions.DEFAULT);
//end::x-pack-delete-ml-datafeed-request
//tag::x-pack-delete-ml-datafeed-response
boolean isAcknowledged = deleteDatafeedResponse.isAcknowledged(); //<1>
//end::x-pack-delete-ml-datafeed-response
}
// Recreate datafeed to allow second deletion
client.machineLearning().putDatafeed(new PutDatafeedRequest(datafeed), RequestOptions.DEFAULT);
{
//tag::x-pack-delete-ml-datafeed-request-listener
ActionListener<AcknowledgedResponse> listener = new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(AcknowledgedResponse acknowledgedResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
//end::x-pack-delete-ml-datafeed-request-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
//tag::x-pack-delete-ml-datafeed-request-async
DeleteDatafeedRequest deleteDatafeedRequest = new DeleteDatafeedRequest(datafeedId);
client.machineLearning().deleteDatafeedAsync(deleteDatafeedRequest, RequestOptions.DEFAULT, listener); // <1>
//end::x-pack-delete-ml-datafeed-request-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
public void testGetBuckets() throws IOException, InterruptedException {
RestHighLevelClient client = highLevelClient();
@ -636,8 +804,85 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
public void testDeleteForecast() throws Exception {
RestHighLevelClient client = highLevelClient();
Job job = MachineLearningIT.buildJob("deleting-forecast-for-job");
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT);
PostDataRequest.JsonBuilder builder = new PostDataRequest.JsonBuilder();
for(int i = 0; i < 30; i++) {
Map<String, Object> hashMap = new HashMap<>();
hashMap.put("total", randomInt(1000));
hashMap.put("timestamp", (i+1)*1000);
builder.addDoc(hashMap);
}
PostDataRequest postDataRequest = new PostDataRequest(job.getId(), builder);
client.machineLearning().postData(postDataRequest, RequestOptions.DEFAULT);
client.machineLearning().flushJob(new FlushJobRequest(job.getId()), RequestOptions.DEFAULT);
ForecastJobResponse forecastJobResponse = client.machineLearning().
forecastJob(new ForecastJobRequest(job.getId()), RequestOptions.DEFAULT);
String forecastId = forecastJobResponse.getForecastId();
GetRequest request = new GetRequest(".ml-anomalies-" + job.getId());
request.id(job.getId() + "_model_forecast_request_stats_" + forecastId);
assertBusy(() -> {
GetResponse getResponse = highLevelClient().get(request, RequestOptions.DEFAULT);
assertTrue(getResponse.isExists());
assertTrue(getResponse.getSourceAsString().contains("finished"));
}, 30, TimeUnit.SECONDS);
{
//tag::x-pack-ml-delete-forecast-request
DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest("deleting-forecast-for-job"); //<1>
//end::x-pack-ml-delete-forecast-request
//tag::x-pack-ml-delete-forecast-request-options
deleteForecastRequest.setForecastIds(forecastId); //<1>
deleteForecastRequest.timeout("30s"); //<2>
deleteForecastRequest.setAllowNoForecasts(true); //<3>
//end::x-pack-ml-delete-forecast-request-options
//tag::x-pack-ml-delete-forecast-execute
AcknowledgedResponse deleteForecastResponse = client.machineLearning().deleteForecast(deleteForecastRequest,
RequestOptions.DEFAULT);
//end::x-pack-ml-delete-forecast-execute
//tag::x-pack-ml-delete-forecast-response
boolean isAcknowledged = deleteForecastResponse.isAcknowledged(); //<1>
//end::x-pack-ml-delete-forecast-response
}
{
//tag::x-pack-ml-delete-forecast-listener
ActionListener<AcknowledgedResponse> listener = new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(AcknowledgedResponse DeleteForecastResponse) {
//<1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
//end::x-pack-ml-delete-forecast-listener
DeleteForecastRequest deleteForecastRequest = DeleteForecastRequest.deleteAllForecasts(job.getId());
deleteForecastRequest.setAllowNoForecasts(true);
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::x-pack-ml-delete-forecast-execute-async
client.machineLearning().deleteForecastAsync(deleteForecastRequest, RequestOptions.DEFAULT, listener); //<1>
// end::x-pack-ml-delete-forecast-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
public void testGetJobStats() throws Exception {
RestHighLevelClient client = highLevelClient();
@ -1111,4 +1356,74 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
public void testGetCategories() throws IOException, InterruptedException {
RestHighLevelClient client = highLevelClient();
String jobId = "test-get-categories";
Job job = MachineLearningIT.buildJob(jobId);
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
// Let us index a category
IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc");
indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
indexRequest.source("{\"job_id\": \"test-get-categories\", \"category_id\": 1, \"terms\": \"AAL\"," +
" \"regex\": \".*?AAL.*\", \"max_matching_length\": 3, \"examples\": [\"AAL\"]}", XContentType.JSON);
client.index(indexRequest, RequestOptions.DEFAULT);
{
// tag::x-pack-ml-get-categories-request
GetCategoriesRequest request = new GetCategoriesRequest(jobId); // <1>
// end::x-pack-ml-get-categories-request
// tag::x-pack-ml-get-categories-category-id
request.setCategoryId(1L); // <1>
// end::x-pack-ml-get-categories-category-id
// tag::x-pack-ml-get-categories-page
request.setPageParams(new PageParams(100, 200)); // <1>
// end::x-pack-ml-get-categories-page
// Set page params back to null so the response contains the category we indexed
request.setPageParams(null);
// tag::x-pack-ml-get-categories-execute
GetCategoriesResponse response = client.machineLearning().getCategories(request, RequestOptions.DEFAULT);
// end::x-pack-ml-get-categories-execute
// tag::x-pack-ml-get-categories-response
long count = response.count(); // <1>
List<CategoryDefinition> categories = response.categories(); // <2>
// end::x-pack-ml-get-categories-response
assertEquals(1, categories.size());
}
{
GetCategoriesRequest request = new GetCategoriesRequest(jobId);
// tag::x-pack-ml-get-categories-listener
ActionListener<GetCategoriesResponse> listener =
new ActionListener<GetCategoriesResponse>() {
@Override
public void onResponse(GetCategoriesResponse getcategoriesResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::x-pack-ml-get-categories-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::x-pack-ml-get-categories-execute-async
client.machineLearning().getCategoriesAsync(request, RequestOptions.DEFAULT, listener); // <1>
// end::x-pack-ml-get-categories-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
}

View File

@ -0,0 +1,42 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests;
import org.elasticsearch.test.ESTestCase;
public class DeleteDatafeedRequestTests extends ESTestCase {
public void testConstructor_GivenNullId() {
NullPointerException ex = expectThrows(NullPointerException.class, () -> new DeleteJobRequest(null));
assertEquals("[job_id] must not be null", ex.getMessage());
}
public void testSetForce() {
DeleteDatafeedRequest deleteDatafeedRequest = createTestInstance();
assertFalse(deleteDatafeedRequest.isForce());
deleteDatafeedRequest.setForce(true);
assertTrue(deleteDatafeedRequest.isForce());
}
private DeleteDatafeedRequest createTestInstance() {
return new DeleteDatafeedRequest(DatafeedConfigTests.randomValidDatafeedId());
}
}

View File

@ -0,0 +1,62 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.job.config.JobTests;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class DeleteForecastRequestTests extends AbstractXContentTestCase<DeleteForecastRequest> {
@Override
protected DeleteForecastRequest createTestInstance() {
DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest(JobTests.randomValidJobId());
if (randomBoolean()) {
int length = randomInt(10);
List<String> ids = new ArrayList<>(length);
for(int i = 0; i < length; i++) {
ids.add(randomAlphaOfLength(10));
}
deleteForecastRequest.setForecastIds(ids);
}
if (randomBoolean()) {
deleteForecastRequest.setAllowNoForecasts(randomBoolean());
}
if (randomBoolean()) {
deleteForecastRequest.timeout(randomTimeValue());
}
return deleteForecastRequest;
}
@Override
protected DeleteForecastRequest doParseInstance(XContentParser parser) throws IOException {
return DeleteForecastRequest.PARSER.apply(parser, null);
}
@Override
protected boolean supportsUnknownFields() {
return false;
}
}

View File

@ -0,0 +1,51 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.job.util.PageParams;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
public class GetCategoriesRequestTests extends AbstractXContentTestCase<GetCategoriesRequest> {
@Override
protected GetCategoriesRequest createTestInstance() {
GetCategoriesRequest request = new GetCategoriesRequest(randomAlphaOfLengthBetween(1, 20));
if (randomBoolean()) {
request.setCategoryId(randomNonNegativeLong());
} else {
int from = randomInt(10000);
int size = randomInt(10000);
request.setPageParams(new PageParams(from, size));
}
return request;
}
@Override
protected GetCategoriesRequest doParseInstance(XContentParser parser) throws IOException {
return GetCategoriesRequest.PARSER.apply(parser, null);
}
@Override
protected boolean supportsUnknownFields() {
return false;
}
}

View File

@ -0,0 +1,53 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.job.results.CategoryDefinition;
import org.elasticsearch.client.ml.job.results.CategoryDefinitionTests;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class GetCategoriesResponseTests extends AbstractXContentTestCase<GetCategoriesResponse> {
@Override
protected GetCategoriesResponse createTestInstance() {
String jobId = randomAlphaOfLength(20);
int listSize = randomInt(10);
List<CategoryDefinition> categories = new ArrayList<>(listSize);
for (int j = 0; j < listSize; j++) {
CategoryDefinition category = CategoryDefinitionTests.createTestInstance(jobId);
categories.add(category);
}
return new GetCategoriesResponse(categories, listSize);
}
@Override
protected GetCategoriesResponse doParseInstance(XContentParser parser) throws IOException {
return GetCategoriesResponse.fromXContent(parser);
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
}

View File

@ -18,21 +18,22 @@
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.datafeed.DatafeedConfig;
import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
public class DeleteJobResponseTests extends AbstractXContentTestCase<DeleteJobResponse> {
public class PutDatafeedRequestTests extends AbstractXContentTestCase<PutDatafeedRequest> {
@Override
protected DeleteJobResponse createTestInstance() {
return new DeleteJobResponse();
protected PutDatafeedRequest createTestInstance() {
return new PutDatafeedRequest(DatafeedConfigTests.createRandom());
}
@Override
protected DeleteJobResponse doParseInstance(XContentParser parser) throws IOException {
return DeleteJobResponse.fromXContent(parser);
protected PutDatafeedRequest doParseInstance(XContentParser parser) {
return new PutDatafeedRequest(DatafeedConfig.PARSER.apply(parser, null).build());
}
@Override

View File

@ -0,0 +1,49 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.function.Predicate;
public class PutDatafeedResponseTests extends AbstractXContentTestCase<PutDatafeedResponse> {
@Override
protected PutDatafeedResponse createTestInstance() {
return new PutDatafeedResponse(DatafeedConfigTests.createRandom());
}
@Override
protected PutDatafeedResponse doParseInstance(XContentParser parser) throws IOException {
return PutDatafeedResponse.fromXContent(parser);
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
return field -> !field.isEmpty();
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.client.ml.datafeed;
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.DeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
@ -27,7 +26,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder;
@ -36,19 +34,26 @@ import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class DatafeedConfigTests extends AbstractXContentTestCase<DatafeedConfig> {
@Override
protected DatafeedConfig createTestInstance() {
return createRandom();
}
public static DatafeedConfig createRandom() {
long bucketSpanMillis = 3600000;
DatafeedConfig.Builder builder = constructBuilder();
builder.setIndices(randomStringList(1, 10));
builder.setTypes(randomStringList(0, 10));
if (randomBoolean()) {
builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10)));
try {
builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10)));
} catch (IOException e) {
throw new RuntimeException("Failed to serialize query", e);
}
}
boolean addScriptFields = randomBoolean();
if (addScriptFields) {
@ -72,7 +77,11 @@ public class DatafeedConfigTests extends AbstractXContentTestCase<DatafeedConfig
MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time");
aggs.addAggregator(AggregationBuilders.dateHistogram("buckets")
.interval(aggHistogramInterval).subAggregation(maxTime).field("time"));
builder.setAggregations(aggs);
try {
builder.setAggregations(aggs);
} catch (IOException e) {
throw new RuntimeException("failed to serialize aggs", e);
}
}
if (randomBoolean()) {
builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE));
@ -93,12 +102,6 @@ public class DatafeedConfigTests extends AbstractXContentTestCase<DatafeedConfig
return builder.build();
}
@Override
protected NamedXContentRegistry xContentRegistry() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
return new NamedXContentRegistry(searchModule.getNamedXContents());
}
public static List<String> randomStringList(int min, int max) {
int size = scaledRandomIntBetween(min, max);
List<String> list = new ArrayList<>();
@ -150,21 +153,6 @@ public class DatafeedConfigTests extends AbstractXContentTestCase<DatafeedConfig
expectThrows(NullPointerException.class, () -> new DatafeedConfig.Builder(randomValidDatafeedId(), null));
}
public void testCheckValid_GivenNullIndices() {
DatafeedConfig.Builder conf = constructBuilder();
expectThrows(NullPointerException.class, () -> conf.setIndices(null));
}
public void testCheckValid_GivenNullType() {
DatafeedConfig.Builder conf = constructBuilder();
expectThrows(NullPointerException.class, () -> conf.setTypes(null));
}
public void testCheckValid_GivenNullQuery() {
DatafeedConfig.Builder conf = constructBuilder();
expectThrows(NullPointerException.class, () -> conf.setQuery(null));
}
public static String randomValidDatafeedId() {
CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray());
return generator.ofCodePointsLength(random(), 10, 10);

View File

@ -18,19 +18,16 @@
*/
package org.elasticsearch.client.ml.datafeed;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class DatafeedUpdateTests extends AbstractXContentTestCase<DatafeedUpdate> {
@ -54,7 +51,11 @@ public class DatafeedUpdateTests extends AbstractXContentTestCase<DatafeedUpdate
builder.setTypes(DatafeedConfigTests.randomStringList(1, 10));
}
if (randomBoolean()) {
builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10)));
try {
builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10)));
} catch (IOException e) {
throw new RuntimeException("Failed to serialize query", e);
}
}
if (randomBoolean()) {
int scriptsSize = randomInt(3);
@ -71,7 +72,11 @@ public class DatafeedUpdateTests extends AbstractXContentTestCase<DatafeedUpdate
// Testing with a single agg is ok as we don't have special list xcontent logic
AggregatorFactories.Builder aggs = new AggregatorFactories.Builder();
aggs.addAggregator(AggregationBuilders.avg(randomAlphaOfLength(10)).field(randomAlphaOfLength(10)));
builder.setAggregations(aggs);
try {
builder.setAggregations(aggs);
} catch (IOException e) {
throw new RuntimeException("Failed to serialize aggs", e);
}
}
if (randomBoolean()) {
builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE));
@ -91,11 +96,4 @@ public class DatafeedUpdateTests extends AbstractXContentTestCase<DatafeedUpdate
protected boolean supportsUnknownFields() {
return false;
}
@Override
protected NamedXContentRegistry xContentRegistry() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
return new NamedXContentRegistry(searchModule.getNamedXContents());
}
}

View File

@ -25,7 +25,7 @@ import java.util.Arrays;
public class CategoryDefinitionTests extends AbstractXContentTestCase<CategoryDefinition> {
public CategoryDefinition createTestInstance(String jobId) {
public static CategoryDefinition createTestInstance(String jobId) {
CategoryDefinition categoryDefinition = new CategoryDefinition(jobId);
categoryDefinition.setCategoryId(randomLong());
categoryDefinition.setTerms(randomAlphaOfLength(10));

View File

@ -57,6 +57,8 @@ integTestCluster {
// TODO: remove this for 7.0, this exists to allow the doc examples in 6.x to continue using the defaults
systemProperty 'es.scripting.use_java_time', 'false'
systemProperty 'es.scripting.update.ctx_in_params', 'false'
//TODO: remove this once the cname is prepended to the address by default in 7.0
systemProperty 'es.http.cname_in_publish_address', 'true'
}
// remove when https://github.com/elastic/elasticsearch/issues/31305 is fixed

View File

@ -0,0 +1,49 @@
[[java-rest-high-x-pack-ml-delete-datafeed]]
=== Delete Datafeed API
[[java-rest-high-x-pack-machine-learning-delete-datafeed-request]]
==== Delete Datafeed Request
A `DeleteDatafeedRequest` object requires a non-null `datafeedId` and can optionally set `force`.
Can be executed as follows:
["source","java",subs="attributes,callouts,macros"]
---------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-datafeed-request]
---------------------------------------------------
<1> Use to forcefully delete a started datafeed;
this method is quicker than stopping and deleting the datafeed.
Defaults to `false`.
[[java-rest-high-x-pack-machine-learning-delete-datafeed-response]]
==== Delete Datafeed Response
The returned `AcknowledgedResponse` object indicates the acknowledgement of the request:
["source","java",subs="attributes,callouts,macros"]
---------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-datafeed-response]
---------------------------------------------------
<1> `isAcknowledged` was the deletion request acknowledged or not
[[java-rest-high-x-pack-machine-learning-delete-datafeed-async]]
==== Delete Datafeed Asynchronously
This request can also be made asynchronously.
["source","java",subs="attributes,callouts,macros"]
---------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-datafeed-request-async]
---------------------------------------------------
<1> The `DeleteDatafeedRequest` to execute and the `ActionListener` to alert on completion or error.
The deletion request returns immediately. Once the request is completed, the `ActionListener` is
called back using the `onResponse` or `onFailure`. The latter indicates some failure occurred when
making the request.
A typical listener for a `DeleteDatafeedRequest` could be defined as follows:
["source","java",subs="attributes,callouts,macros"]
---------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-datafeed-request-listener]
---------------------------------------------------
<1> The action to be taken when it is completed
<2> What to do when a failure occurs

View File

@ -0,0 +1,78 @@
[[java-rest-high-x-pack-ml-delete-forecast]]
=== Delete Forecast API
The Delete Forecast API provides the ability to delete a {ml} job's
forecast in the cluster.
It accepts a `DeleteForecastRequest` object and responds
with an `AcknowledgedResponse` object.
[[java-rest-high-x-pack-ml-delete-forecast-request]]
==== Delete Forecast Request
A `DeleteForecastRequest` object gets created with an existing non-null `jobId`.
All other fields are optional for the request.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-request]
--------------------------------------------------
<1> Constructing a new request referencing an existing `jobId`
==== Optional Arguments
The following arguments are optional.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-request-options]
--------------------------------------------------
<1> Sets the specific forecastIds to delete, can be set to `_all` to indicate ALL forecasts for the given
`jobId`
<2> Set the timeout for the request to respond, default is 30 seconds
<3> Set the `allow_no_forecasts` option. When `true` no error will be returned if an `_all`
request finds no forecasts. It defaults to `true`
[[java-rest-high-x-pack-ml-delete-forecast-execution]]
==== Execution
The request can be executed through the `MachineLearningClient` contained
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-execute]
--------------------------------------------------
[[java-rest-high-x-pack-ml-delete-forecast-execution-async]]
==== Asynchronous Execution
The request can also be executed asynchronously:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-execute-async]
--------------------------------------------------
<1> The `DeleteForecastRequest` to execute and the `ActionListener` to use when
the execution completes
The method does not block and returns immediately. The passed `ActionListener` is used
to notify the caller of completion. A typical `ActionListener` for `AcknowledgedResponse` may
look like
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-listener]
--------------------------------------------------
<1> `onResponse` is called back when the action is completed successfully
<2> `onFailure` is called back when some unexpected error occurs
[[java-rest-high-x-pack-ml-delete-forecast-response]]
==== Delete Forecast Response
An `AcknowledgedResponse` contains an acknowledgement of the forecast(s) deletion
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-response]
--------------------------------------------------
<1> `isAcknowledged()` indicates if the forecast was successfully deleted or not.

View File

@ -18,7 +18,7 @@ Defaults to `false`
[[java-rest-high-x-pack-machine-learning-delete-job-response]]
==== Delete Job Response
The returned `DeleteJobResponse` object indicates the acknowledgement of the request:
The returned `AcknowledgedResponse` object indicates the acknowledgement of the request:
["source","java",subs="attributes,callouts,macros"]
---------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-delete-ml-job-response]

View File

@ -0,0 +1,83 @@
[[java-rest-high-x-pack-ml-get-categories]]
=== Get Categories API
The Get Categories API retrieves one or more category results.
It accepts a `GetCategoriesRequest` object and responds
with a `GetCategoriesResponse` object.
[[java-rest-high-x-pack-ml-get-categories-request]]
==== Get Categories Request
A `GetCategoriesRequest` object gets created with an existing non-null `jobId`.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-request]
--------------------------------------------------
<1> Constructing a new request referencing an existing `jobId`
==== Optional Arguments
The following arguments are optional:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-category-id]
--------------------------------------------------
<1> The id of the category to get. Otherwise it will return all categories.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-page]
--------------------------------------------------
<1> The page parameters `from` and `size`. `from` specifies the number of categories to skip.
`size` specifies the maximum number of categories to get. Defaults to `0` and `100` respectively.
[[java-rest-high-x-pack-ml-get-categories-execution]]
==== Execution
The request can be executed through the `MachineLearningClient` contained
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-execute]
--------------------------------------------------
[[java-rest-high-x-pack-ml-get-categories-execution-async]]
==== Asynchronous Execution
The request can also be executed asynchronously:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-execute-async]
--------------------------------------------------
<1> The `GetCategoriesRequest` to execute and the `ActionListener` to use when
the execution completes
The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back with the `onResponse` method
if the execution is successful or the `onFailure` method if the execution
failed.
A typical listener for `GetCategoriesResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-listener]
--------------------------------------------------
<1> `onResponse` is called back when the action is completed successfully
<2> `onFailure` is called back when some unexpected error occurs
[[java-rest-high-snapshot-ml-get-categories-response]]
==== Get Categories Response
The returned `GetCategoriesResponse` contains the requested categories:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-response]
--------------------------------------------------
<1> The count of categories that were matched
<2> The categories retrieved

View File

@ -0,0 +1,124 @@
[[java-rest-high-x-pack-ml-put-datafeed]]
=== Put Datafeed API
The Put Datafeed API can be used to create a new {ml} datafeed
in the cluster. The API accepts a `PutDatafeedRequest` object
as a request and returns a `PutDatafeedResponse`.
[[java-rest-high-x-pack-ml-put-datafeed-request]]
==== Put Datafeed Request
A `PutDatafeedRequest` requires the following argument:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-request]
--------------------------------------------------
<1> The configuration of the {ml} datafeed to create
[[java-rest-high-x-pack-ml-put-datafeed-config]]
==== Datafeed Configuration
The `DatafeedConfig` object contains all the details about the {ml} datafeed
configuration.
A `DatafeedConfig` requires the following arguments:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config]
--------------------------------------------------
<1> The datafeed ID and the job ID
<2> The indices that contain the data to retrieve and feed into the job
==== Optional Arguments
The following arguments are optional:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-chunking-config]
--------------------------------------------------
<1> Specifies how data searches are split into time chunks.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-frequency]
--------------------------------------------------
<1> The interval at which scheduled queries are made while the datafeed runs in real time.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-query]
--------------------------------------------------
<1> A query to filter the search results by. Defaults to the `match_all` query.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-query-delay]
--------------------------------------------------
<1> The time interval behind real time that data is queried.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-script-fields]
--------------------------------------------------
<1> Allows the use of script fields.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-scroll-size]
--------------------------------------------------
<1> The `size` parameter used in the searches.
[[java-rest-high-x-pack-ml-put-datafeed-execution]]
==== Execution
The Put Datafeed API can be executed through a `MachineLearningClient`
instance. Such an instance can be retrieved from a `RestHighLevelClient`
using the `machineLearning()` method:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-execute]
--------------------------------------------------
[[java-rest-high-x-pack-ml-put-datafeed-response]]
==== Response
The returned `PutDatafeedResponse` returns the full representation of
the new {ml} datafeed if it has been successfully created. This will
contain the creation time and other fields initialized using
default values:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-response]
--------------------------------------------------
<1> The created datafeed
[[java-rest-high-x-pack-ml-put-datafeed-async]]
==== Asynchronous Execution
This request can be executed asynchronously:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-execute-async]
--------------------------------------------------
<1> The `PutDatafeedRequest` to execute and the `ActionListener` to use when
the execution completes
The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back using the `onResponse` method
if the execution successfully completed or using the `onFailure` method if
it failed.
A typical listener for `PutDatafeedResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed. The response is
provided as an argument
<2> Called in case of failure. The raised exception is provided as an argument

View File

@ -142,7 +142,7 @@ This request can be executed asynchronously:
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-job-execute-async]
--------------------------------------------------
<1> The `PutMlJobRequest` to execute and the `ActionListener` to use when
<1> The `PutJobRequest` to execute and the `ActionListener` to use when
the execution completes
The asynchronous method does not block and returns immediately. Once it is

View File

@ -220,12 +220,16 @@ The Java High Level REST Client supports the following Machine Learning APIs:
* <<java-rest-high-x-pack-ml-flush-job>>
* <<java-rest-high-x-pack-ml-update-job>>
* <<java-rest-high-x-pack-ml-get-job-stats>>
* <<java-rest-high-x-pack-ml-put-datafeed>>
* <<java-rest-high-x-pack-ml-delete-datafeed>>
* <<java-rest-high-x-pack-ml-forecast-job>>
* <<java-rest-high-x-pack-ml-delete-forecast>>
* <<java-rest-high-x-pack-ml-get-buckets>>
* <<java-rest-high-x-pack-ml-get-overall-buckets>>
* <<java-rest-high-x-pack-ml-get-records>>
* <<java-rest-high-x-pack-ml-post-data>>
* <<java-rest-high-x-pack-ml-get-influencers>>
* <<java-rest-high-x-pack-ml-get-categories>>
include::ml/put-job.asciidoc[]
include::ml/get-job.asciidoc[]
@ -234,13 +238,17 @@ include::ml/open-job.asciidoc[]
include::ml/close-job.asciidoc[]
include::ml/update-job.asciidoc[]
include::ml/flush-job.asciidoc[]
include::ml/put-datafeed.asciidoc[]
include::ml/delete-datafeed.asciidoc[]
include::ml/get-job-stats.asciidoc[]
include::ml/forecast-job.asciidoc[]
include::ml/delete-forecast.asciidoc[]
include::ml/get-buckets.asciidoc[]
include::ml/get-overall-buckets.asciidoc[]
include::ml/get-records.asciidoc[]
include::ml/post-data.asciidoc[]
include::ml/get-influencers.asciidoc[]
include::ml/get-categories.asciidoc[]
== Migration APIs

View File

@ -348,7 +348,7 @@ GET /_search
\... will sort the composite bucket in descending order when comparing values from the `date_histogram` source
and in ascending order when comparing values from the `terms` source.
====== Missing bucket
==== Missing bucket
By default documents without a value for a given source are ignored.
It is possible to include them in the response by setting `missing_bucket` to

View File

@ -37,6 +37,8 @@ include::tokenfilters/multiplexer-tokenfilter.asciidoc[]
include::tokenfilters/condition-tokenfilter.asciidoc[]
include::tokenfilters/predicate-tokenfilter.asciidoc[]
include::tokenfilters/stemmer-tokenfilter.asciidoc[]
include::tokenfilters/stemmer-override-tokenfilter.asciidoc[]

View File

@ -0,0 +1,79 @@
[[analysis-predicatefilter-tokenfilter]]
=== Predicate Token Filter Script
The predicate_token_filter token filter takes a predicate script, and removes tokens that do
not match the predicate.
[float]
=== Options
[horizontal]
script:: a predicate script that determines whether or not the current token will
be emitted. Note that only inline scripts are supported.
[float]
=== Settings example
You can set it up like:
[source,js]
--------------------------------------------------
PUT /condition_example
{
"settings" : {
"analysis" : {
"analyzer" : {
"my_analyzer" : {
"tokenizer" : "standard",
"filter" : [ "my_script_filter" ]
}
},
"filter" : {
"my_script_filter" : {
"type" : "predicate_token_filter",
"script" : {
"source" : "token.getTerm().length() > 5" <1>
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
<1> This will emit tokens that are more than 5 characters long
And test it like:
[source,js]
--------------------------------------------------
POST /condition_example/_analyze
{
"analyzer" : "my_analyzer",
"text" : "What Flapdoodle"
}
--------------------------------------------------
// CONSOLE
// TEST[continued]
And it'd respond:
[source,js]
--------------------------------------------------
{
"tokens": [
{
"token": "Flapdoodle", <1>
"start_offset": 5,
"end_offset": 15,
"type": "<ALPHANUM>",
"position": 1 <2>
}
]
}
--------------------------------------------------
// TESTRESPONSE
<1> The token 'What' has been removed from the tokenstream because it does not
match the predicate.
<2> The position and offset values are unaffected by the removal of earlier tokens

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 86 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

View File

@ -87,3 +87,9 @@ depending on whether {security} is enabled. Previously a
404 - NOT FOUND (IndexNotFoundException) could be returned in case the
current user was not authorized for any alias. An empty response with
status 200 - OK is now returned instead at all times.
==== Put User API response no longer has `user` object
The Put User API response was changed in 6.5.0 to add the `created` field
outside of the user object where it previously had been. In 7.0.0 the user
object has been removed in favor of the top level `created` field.

View File

@ -40,3 +40,16 @@ will be removed in the future, thus requiring HTTP to always be enabled.
This setting has been removed, as disabling http pipelining support on the server
provided little value. The setting `http.pipelining.max_events` can still be used to
limit the number of pipelined requests in-flight.
==== Cross-cluster search settings renamed
The cross-cluster search remote cluster connection infrastructure is also used
in cross-cluster replication. This means that the setting names
`search.remote.*` used for configuring cross-cluster search belie the fact that
they also apply to other situations where a connection to a remote cluster as
used. Therefore, these settings have been renamed from `search.remote.*` to
`cluster.remote.*`. For backwards compatibility purposes, we will fallback to
`search.remote.*` if `cluster.remote.*` is not set. For any such settings stored
in the cluster state, or set on dynamic settings updates, we will automatically
upgrade the setting from `search.remote.*` to `cluster.remote.*`. The fallback
settings will be removed in 8.0.0.

View File

@ -207,6 +207,51 @@ repositories.url.allowed_urls: ["http://www.example.org/root/*", "https://*.mydo
URL repositories with `file:` URLs can only point to locations registered in the `path.repo` setting similar to
shared file system repository.
[float]
[role="xpack"]
[testenv="basic"]
===== Source Only Repository
A source repository enables you to create minimal, source-only snapshots that take up to 50% less space on disk.
Source only snapshots contain stored fields and index metadata. They do not include index or doc values structures
and are not searchable when restored. After restoring a source-only snapshot, you must <<docs-reindex,reindex>>
the data into a new index.
Source repositories delegate to another snapshot repository for storage.
[IMPORTANT]
==================================================
Source only snapshots are only supported if the `_source` field is enabled and no source-filtering is applied.
When you restore a source only snapshot:
* The restored index is read-only and can only serve `match_all` search or scroll requests to enable reindexing.
* Queries other than `match_all` and `_get` requests are not supported.
* The mapping of the restored index is empty, but the original mapping is available from the types top
level `meta` element.
==================================================
When you create a source repository, you must specify the type and name of the delegate repository
where the snapshots will be stored:
[source,js]
-----------------------------------
PUT _snapshot/my_src_only_repository
{
"type": "source",
"settings": {
"delegate_type": "fs",
"location": "my_backup_location"
}
}
-----------------------------------
// CONSOLE
// TEST[continued]
[float]
===== Repository plugins

View File

@ -172,7 +172,7 @@ GET /_search
The example above creates a boolean query:
`(ny OR (new AND york)) city)`
`(ny OR (new AND york)) city`
that matches documents with the term `ny` or the conjunction `new AND york`.
By default the parameter `auto_generate_synonyms_phrase_query` is set to `true`.

View File

@ -13,6 +13,9 @@ Every context mapping has a unique name and a type. There are two types: `catego
and `geo`. Context mappings are configured under the `contexts` parameter in
the field mapping.
NOTE: It is mandatory to provide a context when indexing and querying
a context enabled completion field.
The following defines types, each with two context mappings for a completion
field:
@ -84,10 +87,6 @@ PUT place_path_category
NOTE: Adding context mappings increases the index size for completion field. The completion index
is entirely heap resident, you can monitor the completion field index size using <<indices-stats>>.
NOTE: deprecated[7.0.0, Indexing a suggestion without context on a context enabled completion field is deprecated
and will be removed in the next major release. If you want to index a suggestion that matches all contexts you should
add a special context for it.]
[[suggester-context-category]]
[float]
==== Category Context
@ -160,9 +159,9 @@ POST place/_search?pretty
// CONSOLE
// TEST[continued]
Note: deprecated[7.0.0, When no categories are provided at query-time, all indexed documents are considered.
Querying with no categories on a category enabled completion field is deprecated and will be removed in the next major release
as it degrades search performance considerably.]
NOTE: If multiple categories or category contexts are set on the query
they are merged as a disjunction. This means that suggestions match
if they contain at least one of the provided context values.
Suggestions with certain categories can be boosted higher than others.
The following filters suggestions by categories and additionally boosts
@ -218,6 +217,9 @@ multiple category context clauses. The following parameters are supported for a
so on, by specifying a category prefix of 'type'.
Defaults to `false`
NOTE: If a suggestion entry matches multiple contexts the final score is computed as the
maximum score produced by any matching contexts.
[[suggester-context-geo]]
[float]
==== Geo location Context
@ -307,6 +309,10 @@ POST place/_search
NOTE: When a location with a lower precision at query time is specified, all suggestions
that fall within the area will be considered.
NOTE: If multiple categories or category contexts are set on the query
they are merged as a disjunction. This means that suggestions match
if they contain at least one of the provided context values.
Suggestions that are within an area represented by a geohash can also be boosted higher
than others, as shown by the following:
@ -349,6 +355,9 @@ POST place/_search?pretty
that fall under the geohash representation of '(43.6624803, -79.3863353)'
with a default precision of '6' by a factor of `2`
NOTE: If a suggestion entry matches multiple contexts the final score is computed as the
maximum score produced by any matching contexts.
In addition to accepting context values, a context query can be composed of
multiple context clauses. The following parameters are supported for a
`category` context clause:

View File

@ -29,17 +29,17 @@ information, see <<security-settings>>.
For more information about encrypting communications across the Elastic Stack,
see {xpack-ref}/encrypting-communications.html[Encrypting Communications].
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/node-certificates.asciidoc
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/node-certificates.asciidoc
include::node-certificates.asciidoc[]
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/tls-transport.asciidoc
include::tls-transport.asciidoc[]
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/tls-http.asciidoc
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/tls-http.asciidoc
include::tls-http.asciidoc[]
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/tls-ad.asciidoc
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/tls-ad.asciidoc
include::tls-ad.asciidoc[]
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/securing-communications/tls-ldap.asciidoc
include::tls-ldap.asciidoc[]

View File

@ -37,7 +37,7 @@ transport.profiles.client.bind_host: 1.1.1.1 <2>
<2> The bind address for the network used for client communication
If separate networks are not available, then
{xpack-ref}/ip-filtering.html[IP Filtering] can
{stack-ov}/ip-filtering.html[IP Filtering] can
be enabled to limit access to the profiles.
When using SSL for transport, a different set of certificates can also be used
@ -65,4 +65,4 @@ transport.profiles.client.xpack.security.ssl.client_authentication: none
This setting keeps certificate authentication active for node-to-node traffic,
but removes the requirement to distribute a signed certificate to transport
clients. For more information, see
{xpack-ref}/java-clients.html#transport-client[Configuring the Transport Client to work with a Secured Cluster].
{stack-ov}/java-clients.html#transport-client[Configuring the Transport Client to work with a Secured Cluster].

View File

@ -295,8 +295,9 @@ as _properties_ within Windows Installer documentation) that can be passed to `m
`SKIPSETTINGPASSWORDS`::
When installing with a `Trial` license and X-Pack Security enabled, whether the
installation should skip setting up the built-in users `elastic`, `kibana` and `logstash_system`.
When installing with a `Trial` license and {security} enabled, whether the
installation should skip setting up the built-in users `elastic`, `kibana`,
`logstash_system`, `apm_system`, and `beats_system`.
Defaults to `false`
`ELASTICUSERPASSWORD`::

View File

@ -0,0 +1,57 @@
[role="xpack"]
[testenv="platinum"]
[[sql-client-apps-dbeaver]]
=== DBeaver
[quote, https://dbeaver.io/]
____
https://dbeaver.io/[DBeaver] DBeaver is free and open source universal database tool for developers and database administrators.
____
==== Prerequisites
* DBeaver version 5.1.4 or higher
* {es-sql} <<sql-jdbc, JDBC driver>>
==== New Connection
Create a new connection either through the menu *File* > *New* > *Database Connection* menu or directly through the *Database Connection* panel.
image:images/sql/client-apps/dbeaver-1-new-conn.png[]
==== Select {es} type
Select the {es} type from the available connection types:
image:images/sql/client-apps/dbeaver-2-conn-es.png[]
==== Specify the {es} cluster information
Configure the {es-sql} connection appropriately:
image:images/sql/client-apps/dbeaver-3-conn-props.png[]
==== Verify the driver version
Make sure the correct JDBC driver version is used by using the *Edit Driver Settings* button:
image:images/sql/client-apps/dbeaver-4-driver-ver.png[]
DBeaver is aware of the {es} JDBC maven repository so simply *Download/Update* the artifact or add a new one. As an alternative one can add a local file instead if the {es} Maven repository is not an option.
When changing the driver, make sure to click on the *Find Class* button at the bottom - the Driver class should be picked out automatically however this provides a sanity check that the driver jar is properly found and it is not corrupt.
==== Test connectivity
Once the driver version and the settings are in place, use *Test Connection* to check that everything works. If things are okay, one should get a confirmation window with the version of the driver and that of {es-sql}:
image:images/sql/client-apps/dbeaver-5-test-conn.png[]
Click *Finish* and the new {es} connection appears in the *Database Connection* panel.
DBeaver is now configured to talk to {es}.
==== Connect to {es}
Simply click on the {es} connection and start querying and exploring {es}:
image:images/sql/client-apps/dbeaver-6-data.png[]

View File

@ -0,0 +1,42 @@
[role="xpack"]
[testenv="platinum"]
[[sql-client-apps-dbvis]]
=== DbVisualizer
[quote, http://www.dbvis.com/]
____
https://www.dbvis.com/[DbVisualizer] is a database management and analysis tool for all major databases.
____
==== Prerequisites
* {es-sql} <<sql-jdbc, JDBC driver>>
==== Add {es} JDBC driver
Add the {es} JDBC driver to DbVisualizer through *Tools* > *Driver Manager*:
image:images/sql/client-apps/dbvis-1-driver-manager.png[]
Create a new driver entry through *Driver* > *Create Driver* entry and add the JDBC driver in the files panel
through the buttons on the right. Once specify, the driver class and its version should be automatically picked up - one can force the refresh through the *Find driver in liste locations* button, the second from the bottom on the right hand side:
image:images/sql/client-apps/dbvis-2-driver.png[]
==== Create a new connection
Once the {es} driver is in place, create a new connection:
image:images/sql/client-apps/dbvis-3-new-conn.png[]
One can use the wizard or add the settings all at once:
image:images/sql/client-apps/dbvis-4-conn-props.png[]
Press *Connect* and the driver version (as that of the cluster) should show up under *Connection Message*.
==== Execute SQL queries
The setup is done. DbVisualizer can be used to run queries against {es} and explore its content:
image:images/sql/client-apps/dbvis-5-data.png[]

View File

@ -0,0 +1,21 @@
[role="xpack"]
[testenv="platinum"]
[[sql-client-apps]]
== SQL Client Applications
Thanks to its <<sql-jdbc, JDBC>> interface, a broad range of third-party applications can use {es}'s SQL capabilities.
This section lists, in alphabetical order, a number of them and their respective configuration - the list however is by no means comprehensive (feel free to https://www.elastic.co/blog/art-of-pull-request[submit a PR] to improve it):
as long as the app can use the {es-sql} driver, it can use {es-sql}.
* <<sql-client-apps-dbeaver, DBeaver>>
* <<sql-client-apps-dbvis, DbVisualizer>>
* <<sql-client-apps-squirrel, SQuirreL SQL>>
* <<sql-client-apps-workbench, SQL Workbench>>
NOTE: Each application has its own requirements and license; these are outside the scope of this documentation
which covers only the configuration aspect with {es-sql}.
include::dbeaver.asciidoc[]
include::dbvis.asciidoc[]
include::squirrel.asciidoc[]
include::workbench.asciidoc[]

View File

@ -0,0 +1,50 @@
[role="xpack"]
[testenv="platinum"]
[[sql-client-apps-squirrel]]
=== SQquirelL SQL
[quote, http://squirrel-sql.sourceforge.net/]
____
http://squirrel-sql.sourceforge.net/[SQuirelL SQL] is a graphical, [multi-platform] Java program that will allow you to view the structure of a JDBC compliant database [...].
____
==== Prerequisites
* {es-sql} <<sql-jdbc, JDBC driver>>
==== Add {es} JDBC Driver
To add the {es} JDBC driver, use *Windows* > *View Drivers* menu (or Ctrl+Shift+D shortcut):
image:images/sql/client-apps/squirell-1-view-drivers.png[]
This opens up the `Drivers` panel on the left. Click on the `+` sign to create a new driver:
image:images/sql/client-apps/squirell-2-new-driver.png[]
Select the *Extra Class Path* tab and *Add* the JDBC jar. *List Drivers* to have the `Class Name` filled-in
automatically and name the connection:
image:images/sql/client-apps/squirell-3-add-driver.png[]
The driver should now appear in the list:
image:images/sql/client-apps/squirell-4-driver-list.png[]
==== Add an alias for {es}
Add a new connection or in SQuirelL terminology an _alias_ using the new driver. To do so, select the *Aliases* panel on the left and click the `+` sign:
image:images/sql/client-apps/squirell-5-add-alias.png[]
Name the new alias and select the `Elasticsearch` driver previously added:
image:images/sql/client-apps/squirell-6-alias-props.png[]
The setup is completed. Double check it by clicking on *Test Connection*.
==== Execute SQL queries
The connection should open automatically (if it has been created before simply click on *Connect* in the *Alias* panel). SQuirelL SQL can now issue SQL commands to {es}:
image:images/sql/client-apps/squirell-7-data.png[]

View File

@ -0,0 +1,40 @@
[role="xpack"]
[testenv="platinum"]
[[sql-client-apps-workbench]]
=== SQL Workbench/J
[quote, https://www.sql-workbench.eu/]
____
https://www.sql-workbench.eu/[SQL Workbench/J] is a free, DBMS-independent, cross-platform SQL query tool.
____
==== Prerequisites
* {es-sql} <<sql-jdbc, JDBC driver>>
==== Add {es} JDBC driver
Add the {es} JDBC driver to SQL Workbench/J through *Manage Drivers* either from the main windows in the *File* menu or from the *Connect* window:
image:images/sql/client-apps/workbench-1-manage-drivers.png[]
Add a new entry to the list through the blank page button in the upper left corner. Add the JDBC jar, provide a name and click on the magnifier button to have the driver *Classname* picked-up automatically:
image:images/sql/client-apps/workbench-2-add-driver.png[]
==== Create a new connection profile
With the driver configured, create a new connection profile through *File* > *Connect Window* (or Alt+C shortcut):
image:images/sql/client-apps/workbench-3-connection.png[]
Select the previously configured driver and set the URL of your cluster using the JDBC syntax.
Verify the connection through the *Test* button - a confirmation window should appear that everything is properly configured.
The setup is complete.
==== Execute SQL queries
SQL Workbench/J is ready to talk to {es} through SQL: click on the profile created to execute statements or explore the data:
image:images/sql/client-apps/workbench-4-data.png[]

View File

@ -2,3 +2,4 @@ include::rest.asciidoc[]
include::translate.asciidoc[]
include::cli.asciidoc[]
include::jdbc.asciidoc[]
include::client-apps/index.asciidoc[]

View File

@ -3,14 +3,20 @@
[[sql-jdbc]]
== SQL JDBC
Elasticsearch's SQL jdbc driver is a rich, fully featured JDBC driver for Elasticsearch.
{es}'s SQL jdbc driver is a rich, fully featured JDBC driver for {es}.
It is Type 4 driver, meaning it is a platform independent, stand-alone, Direct to Database,
pure Java driver that converts JDBC calls to Elasticsearch SQL.
pure Java driver that converts JDBC calls to {es-sql}.
[[sql-jdbc-installation]]
[float]
=== Installation
The JDBC driver can be obtained either by downloading it from the https://www.elastic.co/downloads/jdbc-client[elastic.co] site or by using a http://maven.apache.org/[Maven]-compatible tool with the following dependency:
The JDBC driver can be obtained from:
Dedicated page::
https://www.elastic.co/downloads/jdbc-client[elastic.co] provides links, typically for manual downloads.
Maven dependency::
http://maven.apache.org/[Maven]-compatible tools can retrieve it automatically as a dependency:
["source","xml",subs="attributes"]
----

View File

@ -36,6 +36,8 @@ indices and return results in tabular format.
SQL and print tabular results.
<<sql-jdbc,JDBC>>::
A JDBC driver for {es}.
<<sql-client-apps,Client Applications>>::
Documentation for configuring various SQL/BI tools with {es-sql}.
<<sql-spec,SQL Language>>::
Overview of the {es-sql} language, such as supported data types, commands and
syntax.

View File

@ -6,9 +6,12 @@
.Synopsis
[source, sql]
----
DESCRIBE [table identifier<1>|[LIKE pattern<2>]]
DESCRIBE [table identifier<1> | [LIKE pattern<2>]]
----
<1> single table identifier or double quoted es multi index
<2> SQL LIKE pattern
or
[source, sql]
@ -16,6 +19,8 @@ or
DESC [table identifier<1>|[LIKE pattern<2>]]
----
<1> single table identifier or double quoted es multi index
<2> SQL LIKE pattern
.Description

View File

@ -20,6 +20,7 @@ package org.elasticsearch.core.internal.io;
import java.io.Closeable;
import java.io.IOException;
import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileVisitResult;
import java.nio.file.FileVisitor;
import java.nio.file.Files;
@ -36,6 +37,14 @@ import java.util.Map;
*/
public final class IOUtils {
/**
* UTF-8 charset string.
* <p>Where possible, use {@link StandardCharsets#UTF_8} instead,
* as using the String constant may slow things down.
* @see StandardCharsets#UTF_8
*/
public static final String UTF_8 = StandardCharsets.UTF_8.name();
private IOUtils() {
// Static utils methods
}

View File

@ -19,6 +19,13 @@
package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.AttributeSource;
import org.elasticsearch.script.ScriptContext;
/**
@ -30,21 +37,40 @@ public abstract class AnalysisPredicateScript {
* Encapsulation of the state of the current token
*/
public static class Token {
public CharSequence term;
public int pos;
public int posInc;
public int posLen;
public int startOffset;
public int endOffset;
public String type;
public boolean isKeyword;
private final CharTermAttribute termAtt;
private final PositionIncrementAttribute posIncAtt;
private final PositionLengthAttribute posLenAtt;
private final OffsetAttribute offsetAtt;
private final TypeAttribute typeAtt;
private final KeywordAttribute keywordAtt;
// posInc is always 1 at the beginning of a tokenstream and the convention
// from the _analyze endpoint is that tokenstream positions are 0-based
private int pos = -1;
/**
* Create a token exposing values from an AttributeSource
*/
public Token(AttributeSource source) {
this.termAtt = source.addAttribute(CharTermAttribute.class);
this.posIncAtt = source.addAttribute(PositionIncrementAttribute.class);
this.posLenAtt = source.addAttribute(PositionLengthAttribute.class);
this.offsetAtt = source.addAttribute(OffsetAttribute.class);
this.typeAtt = source.addAttribute(TypeAttribute.class);
this.keywordAtt = source.addAttribute(KeywordAttribute.class);
}
public void updatePosition() {
this.pos = this.pos + posIncAtt.getPositionIncrement();
}
public CharSequence getTerm() {
return term;
return termAtt;
}
public int getPositionIncrement() {
return posInc;
return posIncAtt.getPositionIncrement();
}
public int getPosition() {
@ -52,23 +78,23 @@ public abstract class AnalysisPredicateScript {
}
public int getPositionLength() {
return posLen;
return posLenAtt.getPositionLength();
}
public int getStartOffset() {
return startOffset;
return offsetAtt.startOffset();
}
public int getEndOffset() {
return endOffset;
return offsetAtt.endOffset();
}
public String getType() {
return type;
return typeAtt.type();
}
public boolean isKeyword() {
return isKeyword;
return keywordAtt.isKeyword();
}
}

View File

@ -264,6 +264,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
filters.put("pattern_replace", requiresAnalysisSettings(PatternReplaceTokenFilterFactory::new));
filters.put("persian_normalization", PersianNormalizationFilterFactory::new);
filters.put("porter_stem", PorterStemTokenFilterFactory::new);
filters.put("predicate_token_filter",
requiresAnalysisSettings((i, e, n, s) -> new PredicateTokenFilterScriptFactory(i, n, s, scriptService.get())));
filters.put("remove_duplicates", RemoveDuplicatesTokenFilterFactory::new);
filters.put("reverse", ReverseTokenFilterFactory::new);
filters.put("russian_stem", RussianStemTokenFilterFactory::new);

View File

@ -0,0 +1,73 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.FilteringTokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptType;
import java.io.IOException;
/**
* A factory for creating FilteringTokenFilters that determine whether or not to
* accept their underlying token by consulting a script
*/
public class PredicateTokenFilterScriptFactory extends AbstractTokenFilterFactory {
private final AnalysisPredicateScript.Factory factory;
public PredicateTokenFilterScriptFactory(IndexSettings indexSettings, String name, Settings settings, ScriptService scriptService) {
super(indexSettings, name, settings);
Settings scriptSettings = settings.getAsSettings("script");
Script script = Script.parse(scriptSettings);
if (script.getType() != ScriptType.INLINE) {
throw new IllegalArgumentException("Cannot use stored scripts in tokenfilter [" + name + "]");
}
this.factory = scriptService.compile(script, AnalysisPredicateScript.CONTEXT);
}
@Override
public TokenStream create(TokenStream tokenStream) {
return new ScriptFilteringTokenFilter(tokenStream, factory.newInstance());
}
private static class ScriptFilteringTokenFilter extends FilteringTokenFilter {
final AnalysisPredicateScript script;
final AnalysisPredicateScript.Token token;
ScriptFilteringTokenFilter(TokenStream in, AnalysisPredicateScript script) {
super(in);
this.script = script;
this.token = new AnalysisPredicateScript.Token(this);
}
@Override
protected boolean accept() throws IOException {
token.updatePosition();
return script.execute(token);
}
}
}

View File

@ -21,12 +21,6 @@ package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.ConditionalTokenFilter;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
@ -36,6 +30,7 @@ import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptType;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@ -76,30 +71,26 @@ public class ScriptedConditionTokenFilterFactory extends AbstractTokenFilterFact
}
return in;
};
AnalysisPredicateScript script = factory.newInstance();
final AnalysisPredicateScript.Token token = new AnalysisPredicateScript.Token();
return new ConditionalTokenFilter(tokenStream, filter) {
return new ScriptedConditionTokenFilter(tokenStream, filter, factory.newInstance());
}
CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
PositionLengthAttribute posLenAtt = addAttribute(PositionLengthAttribute.class);
OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
KeywordAttribute keywordAtt = addAttribute(KeywordAttribute.class);
private static class ScriptedConditionTokenFilter extends ConditionalTokenFilter {
@Override
protected boolean shouldFilter() {
token.term = termAtt;
token.posInc = posIncAtt.getPositionIncrement();
token.pos += token.posInc;
token.posLen = posLenAtt.getPositionLength();
token.startOffset = offsetAtt.startOffset();
token.endOffset = offsetAtt.endOffset();
token.type = typeAtt.type();
token.isKeyword = keywordAtt.isKeyword();
return script.execute(token);
}
};
private final AnalysisPredicateScript script;
private final AnalysisPredicateScript.Token token;
ScriptedConditionTokenFilter(TokenStream input, Function<TokenStream, TokenStream> inputFactory,
AnalysisPredicateScript script) {
super(input, inputFactory);
this.script = script;
this.token = new AnalysisPredicateScript.Token(this);
}
@Override
protected boolean shouldFilter() throws IOException {
token.updatePosition();
return script.execute(token);
}
}
@Override

View File

@ -0,0 +1,89 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.analysis.common;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.TestEnvironment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import java.io.IOException;
import java.util.Collections;
public class PredicateTokenScriptFilterTests extends ESTokenStreamTestCase {
public void testSimpleFilter() throws IOException {
Settings settings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
Settings indexSettings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("index.analysis.filter.f.type", "predicate_token_filter")
.put("index.analysis.filter.f.script.source", "token.getTerm().length() > 5")
.put("index.analysis.analyzer.myAnalyzer.type", "custom")
.put("index.analysis.analyzer.myAnalyzer.tokenizer", "standard")
.putList("index.analysis.analyzer.myAnalyzer.filter", "f")
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
AnalysisPredicateScript.Factory factory = () -> new AnalysisPredicateScript() {
@Override
public boolean execute(Token token) {
return token.getTerm().length() > 5;
}
};
@SuppressWarnings("unchecked")
ScriptService scriptService = new ScriptService(indexSettings, Collections.emptyMap(), Collections.emptyMap()){
@Override
public <FactoryType> FactoryType compile(Script script, ScriptContext<FactoryType> context) {
assertEquals(context, AnalysisPredicateScript.CONTEXT);
assertEquals(new Script("token.getTerm().length() > 5"), script);
return (FactoryType) factory;
}
};
CommonAnalysisPlugin plugin = new CommonAnalysisPlugin();
plugin.createComponents(null, null, null, null, scriptService, null, null, null, null);
AnalysisModule module
= new AnalysisModule(TestEnvironment.newEnvironment(settings), Collections.singletonList(plugin));
IndexAnalyzers analyzers = module.getAnalysisRegistry().build(idxSettings);
try (NamedAnalyzer analyzer = analyzers.get("myAnalyzer")) {
assertNotNull(analyzer);
assertAnalyzesTo(analyzer, "Vorsprung Durch Technik", new String[]{
"Vorsprung", "Technik"
});
}
}
}

View File

@ -28,9 +28,44 @@
- type: condition
filter: [ "lowercase" ]
script:
source: "token.position > 1 && token.positionIncrement > 0 && token.startOffset > 0 && token.endOffset > 0 && (token.positionLength == 1 || token.type == \"a\" || token.keyword)"
source: "token.position >= 1 && token.positionIncrement > 0 && token.startOffset > 0 && token.endOffset > 0 && (token.positionLength == 1 || token.type == \"a\" || token.keyword)"
- length: { tokens: 3 }
- match: { tokens.0.token: "Vorsprung" }
- match: { tokens.1.token: "durch" }
- match: { tokens.2.token: "technik" }
---
"script_filter":
- do:
indices.analyze:
body:
text: "Vorsprung Durch Technik"
tokenizer: "whitespace"
filter:
- type: predicate_token_filter
script:
source: "token.term.length() > 5"
- length: { tokens: 2 }
- match: { tokens.0.token: "Vorsprung" }
- match: { tokens.1.token: "Technik" }
---
"script_filter_position":
- do:
indices.analyze:
body:
text: "a b c d e f g h"
tokenizer: "whitespace"
filter:
- type: predicate_token_filter
script:
source: "token.position >= 4"
- length: { tokens: 4 }
- match: { tokens.0.token: "e" }
- match: { tokens.1.token: "f" }
- match: { tokens.2.token: "g" }
- match: { tokens.3.token: "h" }

View File

@ -26,6 +26,7 @@ integTestCluster {
module project.project(':modules:mapper-extras')
systemProperty 'es.scripting.use_java_time', 'true'
systemProperty 'es.scripting.update.ctx_in_params', 'false'
systemProperty 'es.http.cname_in_publish_address', 'true'
}
dependencies {

View File

@ -183,7 +183,7 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
/**
* Returns the minimum number of children that are required to match for the parent to be considered a match.
* The default is {@value #DEFAULT_MAX_CHILDREN}
* The default is {@value #DEFAULT_MIN_CHILDREN}
*/
public int minChildren() {
return minChildren;
@ -191,7 +191,7 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
/**
* Returns the maximum number of children that are required to match for the parent to be considered a match.
* The default is {@value #DEFAULT_MIN_CHILDREN}
* The default is {@value #DEFAULT_MAX_CHILDREN}
*/
public int maxChildren() { return maxChildren; }

View File

@ -32,19 +32,23 @@ esplugin {
}
versions << [
'aws': '1.11.223'
'aws': '1.11.406'
]
dependencies {
compile "com.amazonaws:aws-java-sdk-s3:${versions.aws}"
compile "com.amazonaws:aws-java-sdk-kms:${versions.aws}"
compile "com.amazonaws:aws-java-sdk-core:${versions.aws}"
compile "com.amazonaws:jmespath-java:${versions.aws}"
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
compile "commons-logging:commons-logging:${versions.commonslogging}"
compile "commons-codec:commons-codec:${versions.commonscodec}"
compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
compile 'com.fasterxml.jackson.core:jackson-databind:2.6.7.1'
compile 'com.fasterxml.jackson.core:jackson-annotations:2.6.0'
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
compile 'joda-time:joda-time:2.10'
// HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here,
// and whitelist this hack in JarHell
@ -53,6 +57,7 @@ dependencies {
dependencyLicenses {
mapping from: /aws-java-sdk-.*/, to: 'aws-java-sdk'
mapping from: /jmespath-java.*/, to: 'aws-java-sdk'
mapping from: /jackson-.*/, to: 'jackson'
mapping from: /jaxb-.*/, to: 'jaxb'
}

View File

@ -1 +0,0 @@
c3993cb44f5856fa721b7b7ccfc266377c0bf9c0

View File

@ -0,0 +1 @@
43f3b7332d4d527bbf34d4ac6be094f3dabec6de

View File

@ -1 +0,0 @@
c24e6ebe108c60a08098aeaad5ae0b6a5a77b618

View File

@ -0,0 +1 @@
e29854e58dc20f5453c1da7e580a5921b1e9714a

View File

@ -1 +0,0 @@
c2ef96732e22d97952fbcd0a94f1dc376d157eda

View File

@ -0,0 +1 @@
5c3c2c57b076602b3aeef841c63e5848ec52b00d

Some files were not shown because too many files have changed in this diff Show More