Merge branch 'master' into index-lifecycle

This commit is contained in:
Colin Goodheart-Smithe 2018-09-13 09:46:14 +01:00
commit 8e59de3eb2
No known key found for this signature in database
GPG Key ID: F975E7BDD739B3C7
289 changed files with 11262 additions and 3482 deletions

View File

@ -831,6 +831,9 @@ class BuildPlugin implements Plugin<Project> {
// TODO: remove this once ctx isn't added to update script params in 7.0 // TODO: remove this once ctx isn't added to update script params in 7.0
systemProperty 'es.scripting.update.ctx_in_params', 'false' systemProperty 'es.scripting.update.ctx_in_params', 'false'
//TODO: remove this once the cname is prepended to the address by default in 7.0
systemProperty 'es.http.cname_in_publish_address', 'true'
// Set the system keystore/truststore password if we're running tests in a FIPS-140 JVM // Set the system keystore/truststore password if we're running tests in a FIPS-140 JVM
if (project.inFipsJvm) { if (project.inFipsJvm) {
systemProperty 'javax.net.ssl.trustStorePassword', 'password' systemProperty 'javax.net.ssl.trustStorePassword', 'password'

View File

@ -16,7 +16,7 @@ slf4j = 1.6.2
jna = 4.5.1 jna = 4.5.1
# test dependencies # test dependencies
randomizedrunner = 2.5.2 randomizedrunner = 2.7.0
junit = 4.12 junit = 4.12
httpclient = 4.5.2 httpclient = 4.5.2
# When updating httpcore, please also update server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy # When updating httpcore, please also update server/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy

View File

@ -28,10 +28,12 @@ import org.apache.http.entity.ByteArrayEntity;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.client.RequestConverters.EndpointBuilder; import org.elasticsearch.client.RequestConverters.EndpointBuilder;
import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobRequest;
import org.elasticsearch.client.ml.DeleteForecastRequest;
import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobRequest;
import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.FlushJobRequest;
import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.ForecastJobRequest;
import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsRequest;
import org.elasticsearch.client.ml.GetCategoriesRequest;
import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetInfluencersRequest;
import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobRequest;
import org.elasticsearch.client.ml.GetJobStatsRequest; import org.elasticsearch.client.ml.GetJobStatsRequest;
@ -39,6 +41,7 @@ import org.elasticsearch.client.ml.GetOverallBucketsRequest;
import org.elasticsearch.client.ml.GetRecordsRequest; import org.elasticsearch.client.ml.GetRecordsRequest;
import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.OpenJobRequest;
import org.elasticsearch.client.ml.PostDataRequest; import org.elasticsearch.client.ml.PostDataRequest;
import org.elasticsearch.client.ml.PutDatafeedRequest;
import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobRequest;
import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
@ -180,6 +183,38 @@ final class MLRequestConverters {
return request; return request;
} }
static Request putDatafeed(PutDatafeedRequest putDatafeedRequest) throws IOException {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("ml")
.addPathPartAsIs("datafeeds")
.addPathPart(putDatafeedRequest.getDatafeed().getId())
.build();
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
request.setEntity(createEntity(putDatafeedRequest, REQUEST_BODY_CONTENT_TYPE));
return request;
}
static Request deleteForecast(DeleteForecastRequest deleteForecastRequest) throws IOException {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("ml")
.addPathPartAsIs("anomaly_detectors")
.addPathPart(deleteForecastRequest.getJobId())
.addPathPartAsIs("_forecast")
.addPathPart(Strings.collectionToCommaDelimitedString(deleteForecastRequest.getForecastIds()))
.build();
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
RequestConverters.Params params = new RequestConverters.Params(request);
if (deleteForecastRequest.isAllowNoForecasts() != null) {
params.putParam("allow_no_forecasts", Boolean.toString(deleteForecastRequest.isAllowNoForecasts()));
}
if (deleteForecastRequest.timeout() != null) {
params.putParam("timeout", deleteForecastRequest.timeout().getStringRep());
}
return request;
}
static Request getBuckets(GetBucketsRequest getBucketsRequest) throws IOException { static Request getBuckets(GetBucketsRequest getBucketsRequest) throws IOException {
String endpoint = new EndpointBuilder() String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack") .addPathPartAsIs("_xpack")
@ -194,6 +229,20 @@ final class MLRequestConverters {
return request; return request;
} }
static Request getCategories(GetCategoriesRequest getCategoriesRequest) throws IOException {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack")
.addPathPartAsIs("ml")
.addPathPartAsIs("anomaly_detectors")
.addPathPart(getCategoriesRequest.getJobId())
.addPathPartAsIs("results")
.addPathPartAsIs("categories")
.build();
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
request.setEntity(createEntity(getCategoriesRequest, REQUEST_BODY_CONTENT_TYPE));
return request;
}
static Request getOverallBuckets(GetOverallBucketsRequest getOverallBucketsRequest) throws IOException { static Request getOverallBuckets(GetOverallBucketsRequest getOverallBucketsRequest) throws IOException {
String endpoint = new EndpointBuilder() String endpoint = new EndpointBuilder()
.addPathPartAsIs("_xpack") .addPathPartAsIs("_xpack")

View File

@ -19,19 +19,20 @@
package org.elasticsearch.client; package org.elasticsearch.client;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.ml.ForecastJobResponse;
import org.elasticsearch.client.ml.PostDataRequest;
import org.elasticsearch.client.ml.PostDataResponse;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobRequest;
import org.elasticsearch.client.ml.CloseJobResponse; import org.elasticsearch.client.ml.CloseJobResponse;
import org.elasticsearch.client.ml.DeleteForecastRequest;
import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobRequest;
import org.elasticsearch.client.ml.DeleteJobResponse; import org.elasticsearch.client.ml.DeleteJobResponse;
import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.FlushJobRequest;
import org.elasticsearch.client.ml.FlushJobResponse; import org.elasticsearch.client.ml.FlushJobResponse;
import org.elasticsearch.client.ml.ForecastJobRequest;
import org.elasticsearch.client.ml.ForecastJobResponse;
import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsRequest;
import org.elasticsearch.client.ml.GetBucketsResponse; import org.elasticsearch.client.ml.GetBucketsResponse;
import org.elasticsearch.client.ml.GetCategoriesRequest;
import org.elasticsearch.client.ml.GetCategoriesResponse;
import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetInfluencersRequest;
import org.elasticsearch.client.ml.GetInfluencersResponse; import org.elasticsearch.client.ml.GetInfluencersResponse;
import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobRequest;
@ -44,13 +45,19 @@ import org.elasticsearch.client.ml.GetRecordsRequest;
import org.elasticsearch.client.ml.GetRecordsResponse; import org.elasticsearch.client.ml.GetRecordsResponse;
import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.OpenJobRequest;
import org.elasticsearch.client.ml.OpenJobResponse; import org.elasticsearch.client.ml.OpenJobResponse;
import org.elasticsearch.client.ml.PostDataRequest;
import org.elasticsearch.client.ml.PostDataResponse;
import org.elasticsearch.client.ml.PutDatafeedRequest;
import org.elasticsearch.client.ml.PutDatafeedResponse;
import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobRequest;
import org.elasticsearch.client.ml.PutJobResponse; import org.elasticsearch.client.ml.PutJobResponse;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.job.stats.JobStats; import org.elasticsearch.client.ml.job.stats.JobStats;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
/** /**
* Machine Learning API client wrapper for the {@link RestHighLevelClient} * Machine Learning API client wrapper for the {@link RestHighLevelClient}
* *
@ -387,6 +394,11 @@ public final class MachineLearningClient {
/** /**
* Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job} * Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job}
* *
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html"></a>
* </p>
*
* @param request the {@link UpdateJobRequest} object enclosing the desired updates * @param request the {@link UpdateJobRequest} object enclosing the desired updates
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return a PutJobResponse object containing the updated job object * @return a PutJobResponse object containing the updated job object
@ -425,6 +437,10 @@ public final class MachineLearningClient {
/** /**
* Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job} asynchronously * Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job} asynchronously
* *
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-update-job.html"></a>
* </p>
* @param request the {@link UpdateJobRequest} object enclosing the desired updates * @param request the {@link UpdateJobRequest} object enclosing the desired updates
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion * @param listener Listener to be notified upon request completion
@ -438,6 +454,86 @@ public final class MachineLearningClient {
Collections.emptySet()); Collections.emptySet());
} }
/**
* Creates a new Machine Learning Datafeed
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html">ML PUT datafeed documentation</a>
*
* @param request The PutDatafeedRequest containing the {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} settings
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return PutDatafeedResponse with enclosed {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} object
* @throws IOException when there is a serialization issue sending the request or receiving the response
*/
public PutDatafeedResponse putDatafeed(PutDatafeedRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request,
MLRequestConverters::putDatafeed,
options,
PutDatafeedResponse::fromXContent,
Collections.emptySet());
}
/**
* Creates a new Machine Learning Datafeed asynchronously and notifies listener on completion
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-datafeed.html">ML PUT datafeed documentation</a>
*
* @param request The request containing the {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} settings
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion
*/
public void putDatafeedAsync(PutDatafeedRequest request, RequestOptions options, ActionListener<PutDatafeedResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request,
MLRequestConverters::putDatafeed,
options,
PutDatafeedResponse::fromXContent,
listener,
Collections.emptySet());
}
/**
* Deletes Machine Learning Job Forecasts
*
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-forecast.html"></a>
* </p>
*
* @param request the {@link DeleteForecastRequest} object enclosing the desired jobId, forecastIDs, and other options
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return a AcknowledgedResponse object indicating request success
* @throws IOException when there is a serialization issue sending the request or receiving the response
*/
public AcknowledgedResponse deleteForecast(DeleteForecastRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request,
MLRequestConverters::deleteForecast,
options,
AcknowledgedResponse::fromXContent,
Collections.emptySet());
}
/**
* Deletes Machine Learning Job Forecasts asynchronously
*
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-delete-forecast.html"></a>
* </p>
*
* @param request the {@link DeleteForecastRequest} object enclosing the desired jobId, forecastIDs, and other options
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion
*/
public void deleteForecastAsync(DeleteForecastRequest request, RequestOptions options, ActionListener<AcknowledgedResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request,
MLRequestConverters::deleteForecast,
options,
AcknowledgedResponse::fromXContent,
listener,
Collections.emptySet());
}
/** /**
* Gets the buckets for a Machine Learning Job. * Gets the buckets for a Machine Learning Job.
* <p> * <p>
@ -474,6 +570,45 @@ public final class MachineLearningClient {
Collections.emptySet()); Collections.emptySet());
} }
/**
* Gets the categories for a Machine Learning Job.
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html">
* ML GET categories documentation</a>
*
* @param request The request
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @throws IOException when there is a serialization issue sending the request or receiving the response
*/
public GetCategoriesResponse getCategories(GetCategoriesRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request,
MLRequestConverters::getCategories,
options,
GetCategoriesResponse::fromXContent,
Collections.emptySet());
}
/**
* Gets the categories for a Machine Learning Job, notifies listener once the requested buckets are retrieved.
* <p>
* For additional info
* see <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-get-category.html">
* ML GET categories documentation</a>
*
* @param request The request
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener Listener to be notified upon request completion
*/
public void getCategoriesAsync(GetCategoriesRequest request, RequestOptions options, ActionListener<GetCategoriesResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity(request,
MLRequestConverters::getCategories,
options,
GetCategoriesResponse::fromXContent,
listener,
Collections.emptySet());
}
/** /**
* Gets overall buckets for a set of Machine Learning Jobs. * Gets overall buckets for a set of Machine Learning Jobs.
* <p> * <p>

View File

@ -0,0 +1,183 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
/**
* POJO for a delete forecast request
*/
public class DeleteForecastRequest extends ActionRequest implements ToXContentObject {
public static final ParseField FORECAST_ID = new ParseField("forecast_id");
public static final ParseField ALLOW_NO_FORECASTS = new ParseField("allow_no_forecasts");
public static final ParseField TIMEOUT = new ParseField("timeout");
public static final String ALL = "_all";
public static final ConstructingObjectParser<DeleteForecastRequest, Void> PARSER =
new ConstructingObjectParser<>("delete_forecast_request", (a) -> new DeleteForecastRequest((String) a[0]));
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID);
PARSER.declareStringOrNull(
(c, p) -> c.setForecastIds(Strings.commaDelimitedListToStringArray(p)), FORECAST_ID);
PARSER.declareBoolean(DeleteForecastRequest::setAllowNoForecasts, ALLOW_NO_FORECASTS);
PARSER.declareString(DeleteForecastRequest::timeout, TIMEOUT);
}
/**
* Create a new {@link DeleteForecastRequest} that explicitly deletes all forecasts
*
* @param jobId the jobId of the Job whose forecasts to delete
*/
public static DeleteForecastRequest deleteAllForecasts(String jobId) {
DeleteForecastRequest request = new DeleteForecastRequest(jobId);
request.setForecastIds(ALL);
return request;
}
private final String jobId;
private List<String> forecastIds = new ArrayList<>();
private Boolean allowNoForecasts;
private TimeValue timeout;
/**
* Create a new DeleteForecastRequest for the given Job ID
*
* @param jobId the jobId of the Job whose forecast(s) to delete
*/
public DeleteForecastRequest(String jobId) {
this.jobId = Objects.requireNonNull(jobId, Job.ID.getPreferredName());
}
public String getJobId() {
return jobId;
}
public List<String> getForecastIds() {
return forecastIds;
}
/**
* The forecast IDs to delete. Can be also be {@link DeleteForecastRequest#ALL} to explicitly delete ALL forecasts
*
* @param forecastIds forecast IDs to delete
*/
public void setForecastIds(String... forecastIds) {
setForecastIds(Arrays.asList(forecastIds));
}
void setForecastIds(List<String> forecastIds) {
if (forecastIds.stream().anyMatch(Objects::isNull)) {
throw new NullPointerException("forecastIds must not contain null values");
}
this.forecastIds = new ArrayList<>(forecastIds);
}
public Boolean isAllowNoForecasts() {
return allowNoForecasts;
}
/**
* Sets the `allow_no_forecasts` field.
*
* @param allowNoForecasts when {@code true} no error is thrown when {@link DeleteForecastRequest#ALL} does not find any forecasts
*/
public void setAllowNoForecasts(boolean allowNoForecasts) {
this.allowNoForecasts = allowNoForecasts;
}
/**
* Allows to set the timeout
* @param timeout timeout as a string (e.g. 1s)
*/
public void timeout(String timeout) {
this.timeout = TimeValue.parseTimeValue(timeout, this.timeout, getClass().getSimpleName() + ".timeout");
}
/**
* Allows to set the timeout
* @param timeout timeout as a {@link TimeValue}
*/
public void timeout(TimeValue timeout) {
this.timeout = timeout;
}
public TimeValue timeout() {
return timeout;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
DeleteForecastRequest that = (DeleteForecastRequest) other;
return Objects.equals(jobId, that.jobId) &&
Objects.equals(forecastIds, that.forecastIds) &&
Objects.equals(allowNoForecasts, that.allowNoForecasts) &&
Objects.equals(timeout, that.timeout);
}
@Override
public int hashCode() {
return Objects.hash(jobId, forecastIds, allowNoForecasts, timeout);
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
if (forecastIds != null) {
builder.field(FORECAST_ID.getPreferredName(), Strings.collectionToCommaDelimitedString(forecastIds));
}
if (allowNoForecasts != null) {
builder.field(ALLOW_NO_FORECASTS.getPreferredName(), allowNoForecasts);
}
if (timeout != null) {
builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep());
}
builder.endObject();
return builder;
}
}

View File

@ -0,0 +1,128 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.client.ml.job.util.PageParams;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Objects;
/**
* A request to retrieve categories of a given job
*/
public class GetCategoriesRequest extends ActionRequest implements ToXContentObject {
public static final ParseField CATEGORY_ID = new ParseField("category_id");
public static final ConstructingObjectParser<GetCategoriesRequest, Void> PARSER = new ConstructingObjectParser<>(
"get_categories_request", a -> new GetCategoriesRequest((String) a[0]));
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID);
PARSER.declareLong(GetCategoriesRequest::setCategoryId, CATEGORY_ID);
PARSER.declareObject(GetCategoriesRequest::setPageParams, PageParams.PARSER, PageParams.PAGE);
}
private final String jobId;
private Long categoryId;
private PageParams pageParams;
/**
* Constructs a request to retrieve category information from a given job
* @param jobId id of the job from which to retrieve results
*/
public GetCategoriesRequest(String jobId) {
this.jobId = Objects.requireNonNull(jobId);
}
public String getJobId() {
return jobId;
}
public PageParams getPageParams() {
return pageParams;
}
public Long getCategoryId() {
return categoryId;
}
/**
* Sets the category id
* @param categoryId the category id
*/
public void setCategoryId(Long categoryId) {
this.categoryId = categoryId;
}
/**
* Sets the paging parameters
* @param pageParams the paging parameters
*/
public void setPageParams(PageParams pageParams) {
this.pageParams = pageParams;
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Job.ID.getPreferredName(), jobId);
if (categoryId != null) {
builder.field(CATEGORY_ID.getPreferredName(), categoryId);
}
if (pageParams != null) {
builder.field(PageParams.PAGE.getPreferredName(), pageParams);
}
builder.endObject();
return builder;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
GetCategoriesRequest request = (GetCategoriesRequest) obj;
return Objects.equals(jobId, request.jobId)
&& Objects.equals(categoryId, request.categoryId)
&& Objects.equals(pageParams, request.pageParams);
}
@Override
public int hashCode() {
return Objects.hash(jobId, categoryId, pageParams);
}
}

View File

@ -0,0 +1,79 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.job.results.CategoryDefinition;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.List;
import java.util.Objects;
/**
* A response containing the requested categories
*/
public class GetCategoriesResponse extends AbstractResultResponse<CategoryDefinition> {
public static final ParseField CATEGORIES = new ParseField("categories");
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<GetCategoriesResponse, Void> PARSER =
new ConstructingObjectParser<>("get_categories_response", true,
a -> new GetCategoriesResponse((List<CategoryDefinition>) a[0], (long) a[1]));
static {
PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), CategoryDefinition.PARSER, CATEGORIES);
PARSER.declareLong(ConstructingObjectParser.constructorArg(), COUNT);
}
public static GetCategoriesResponse fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
GetCategoriesResponse(List<CategoryDefinition> categories, long count) {
super(CATEGORIES, categories, count);
}
/**
* The retrieved categories
* @return the retrieved categories
*/
public List<CategoryDefinition> categories() {
return results;
}
@Override
public int hashCode() {
return Objects.hash(count, results);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
GetCategoriesResponse other = (GetCategoriesResponse) obj;
return count == other.count && Objects.equals(results, other.results);
}
}

View File

@ -0,0 +1,84 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.client.ml.datafeed.DatafeedConfig;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Objects;
/**
* Request to create a new Machine Learning Datafeed given a {@link DatafeedConfig} configuration
*/
public class PutDatafeedRequest extends ActionRequest implements ToXContentObject {
private final DatafeedConfig datafeed;
/**
* Construct a new PutDatafeedRequest
*
* @param datafeed a {@link DatafeedConfig} configuration to create
*/
public PutDatafeedRequest(DatafeedConfig datafeed) {
this.datafeed = datafeed;
}
public DatafeedConfig getDatafeed() {
return datafeed;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return datafeed.toXContent(builder, params);
}
@Override
public boolean equals(Object object) {
if (this == object) {
return true;
}
if (object == null || getClass() != object.getClass()) {
return false;
}
PutDatafeedRequest request = (PutDatafeedRequest) object;
return Objects.equals(datafeed, request.datafeed);
}
@Override
public int hashCode() {
return Objects.hash(datafeed);
}
@Override
public final String toString() {
return Strings.toString(this);
}
@Override
public ActionRequestValidationException validate() {
return null;
}
}

View File

@ -0,0 +1,70 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.datafeed.DatafeedConfig;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Objects;
/**
* Response containing the newly created {@link DatafeedConfig}
*/
public class PutDatafeedResponse implements ToXContentObject {
private DatafeedConfig datafeed;
public static PutDatafeedResponse fromXContent(XContentParser parser) throws IOException {
return new PutDatafeedResponse(DatafeedConfig.PARSER.parse(parser, null).build());
}
PutDatafeedResponse(DatafeedConfig datafeed) {
this.datafeed = datafeed;
}
public DatafeedConfig getResponse() {
return datafeed;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
datafeed.toXContent(builder, params);
return builder;
}
@Override
public boolean equals(Object object) {
if (this == object) {
return true;
}
if (object == null || getClass() != object.getClass()) {
return false;
}
PutDatafeedResponse response = (PutDatafeedResponse) object;
return Objects.equals(datafeed, response.datafeed);
}
@Override
public int hashCode() {
return Objects.hash(datafeed);
}
}

View File

@ -20,36 +20,37 @@ package org.elasticsearch.client.ml.datafeed;
import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Objects; import java.util.Objects;
/** /**
* Datafeed configuration options pojo. Describes where to proactively pull input * The datafeed configuration object. It specifies which indices
* data from. * to get the data from and offers parameters for customizing different
* <p> * aspects of the process.
* If a value has not been set it will be <code>null</code>. Object wrappers are
* used around integral types and booleans so they can take <code>null</code>
* values.
*/ */
public class DatafeedConfig implements ToXContentObject { public class DatafeedConfig implements ToXContentObject {
public static final int DEFAULT_SCROLL_SIZE = 1000;
public static final ParseField ID = new ParseField("datafeed_id"); public static final ParseField ID = new ParseField("datafeed_id");
public static final ParseField QUERY_DELAY = new ParseField("query_delay"); public static final ParseField QUERY_DELAY = new ParseField("query_delay");
public static final ParseField FREQUENCY = new ParseField("frequency"); public static final ParseField FREQUENCY = new ParseField("frequency");
@ -59,7 +60,6 @@ public class DatafeedConfig implements ToXContentObject {
public static final ParseField QUERY = new ParseField("query"); public static final ParseField QUERY = new ParseField("query");
public static final ParseField SCROLL_SIZE = new ParseField("scroll_size"); public static final ParseField SCROLL_SIZE = new ParseField("scroll_size");
public static final ParseField AGGREGATIONS = new ParseField("aggregations"); public static final ParseField AGGREGATIONS = new ParseField("aggregations");
public static final ParseField AGGS = new ParseField("aggs");
public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields"); public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields");
public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config"); public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config");
@ -77,9 +77,8 @@ public class DatafeedConfig implements ToXContentObject {
builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY); builder.setQueryDelay(TimeValue.parseTimeValue(val, QUERY_DELAY.getPreferredName())), QUERY_DELAY);
PARSER.declareString((builder, val) -> PARSER.declareString((builder, val) ->
builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY); builder.setFrequency(TimeValue.parseTimeValue(val, FREQUENCY.getPreferredName())), FREQUENCY);
PARSER.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), QUERY); PARSER.declareField(Builder::setQuery, DatafeedConfig::parseBytes, QUERY, ObjectParser.ValueType.OBJECT);
PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGREGATIONS); PARSER.declareField(Builder::setAggregations, DatafeedConfig::parseBytes, AGGREGATIONS, ObjectParser.ValueType.OBJECT);
PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), AGGS);
PARSER.declareObject(Builder::setScriptFields, (p, c) -> { PARSER.declareObject(Builder::setScriptFields, (p, c) -> {
List<SearchSourceBuilder.ScriptField> parsedScriptFields = new ArrayList<>(); List<SearchSourceBuilder.ScriptField> parsedScriptFields = new ArrayList<>();
while (p.nextToken() != XContentParser.Token.END_OBJECT) { while (p.nextToken() != XContentParser.Token.END_OBJECT) {
@ -91,29 +90,26 @@ public class DatafeedConfig implements ToXContentObject {
PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, CHUNKING_CONFIG); PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, CHUNKING_CONFIG);
} }
private static BytesReference parseBytes(XContentParser parser) throws IOException {
XContentBuilder contentBuilder = JsonXContent.contentBuilder();
contentBuilder.generator().copyCurrentStructure(parser);
return BytesReference.bytes(contentBuilder);
}
private final String id; private final String id;
private final String jobId; private final String jobId;
/**
* The delay before starting to query a period of time
*/
private final TimeValue queryDelay; private final TimeValue queryDelay;
/**
* The frequency with which queries are executed
*/
private final TimeValue frequency; private final TimeValue frequency;
private final List<String> indices; private final List<String> indices;
private final List<String> types; private final List<String> types;
private final QueryBuilder query; private final BytesReference query;
private final AggregatorFactories.Builder aggregations; private final BytesReference aggregations;
private final List<SearchSourceBuilder.ScriptField> scriptFields; private final List<SearchSourceBuilder.ScriptField> scriptFields;
private final Integer scrollSize; private final Integer scrollSize;
private final ChunkingConfig chunkingConfig; private final ChunkingConfig chunkingConfig;
private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices, List<String> types, private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices, List<String> types,
QueryBuilder query, AggregatorFactories.Builder aggregations, List<SearchSourceBuilder.ScriptField> scriptFields, BytesReference query, BytesReference aggregations, List<SearchSourceBuilder.ScriptField> scriptFields,
Integer scrollSize, ChunkingConfig chunkingConfig) { Integer scrollSize, ChunkingConfig chunkingConfig) {
this.id = id; this.id = id;
this.jobId = jobId; this.jobId = jobId;
@ -156,11 +152,11 @@ public class DatafeedConfig implements ToXContentObject {
return scrollSize; return scrollSize;
} }
public QueryBuilder getQuery() { public BytesReference getQuery() {
return query; return query;
} }
public AggregatorFactories.Builder getAggregations() { public BytesReference getAggregations() {
return aggregations; return aggregations;
} }
@ -183,11 +179,17 @@ public class DatafeedConfig implements ToXContentObject {
if (frequency != null) { if (frequency != null) {
builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep()); builder.field(FREQUENCY.getPreferredName(), frequency.getStringRep());
} }
if (indices != null) {
builder.field(INDICES.getPreferredName(), indices); builder.field(INDICES.getPreferredName(), indices);
}
if (types != null) {
builder.field(TYPES.getPreferredName(), types); builder.field(TYPES.getPreferredName(), types);
builder.field(QUERY.getPreferredName(), query); }
if (query != null) {
builder.field(QUERY.getPreferredName(), asMap(query));
}
if (aggregations != null) { if (aggregations != null) {
builder.field(AGGREGATIONS.getPreferredName(), aggregations); builder.field(AGGREGATIONS.getPreferredName(), asMap(aggregations));
} }
if (scriptFields != null) { if (scriptFields != null) {
builder.startObject(SCRIPT_FIELDS.getPreferredName()); builder.startObject(SCRIPT_FIELDS.getPreferredName());
@ -196,7 +198,9 @@ public class DatafeedConfig implements ToXContentObject {
} }
builder.endObject(); builder.endObject();
} }
if (scrollSize != null) {
builder.field(SCROLL_SIZE.getPreferredName(), scrollSize); builder.field(SCROLL_SIZE.getPreferredName(), scrollSize);
}
if (chunkingConfig != null) { if (chunkingConfig != null) {
builder.field(CHUNKING_CONFIG.getPreferredName(), chunkingConfig); builder.field(CHUNKING_CONFIG.getPreferredName(), chunkingConfig);
} }
@ -205,10 +209,18 @@ public class DatafeedConfig implements ToXContentObject {
return builder; return builder;
} }
private static Map<String, Object> asMap(BytesReference bytesReference) {
return bytesReference == null ? null : XContentHelper.convertToMap(bytesReference, true, XContentType.JSON).v2();
}
/** /**
* The lists of indices and types are compared for equality but they are not * The lists of indices and types are compared for equality but they are not
* sorted first so this test could fail simply because the indices and types * sorted first so this test could fail simply because the indices and types
* lists are in different orders. * lists are in different orders.
*
* Also note this could be a heavy operation when a query or aggregations
* are set as we need to convert the bytes references into maps to correctly
* compare them.
*/ */
@Override @Override
public boolean equals(Object other) { public boolean equals(Object other) {
@ -228,31 +240,40 @@ public class DatafeedConfig implements ToXContentObject {
&& Objects.equals(this.queryDelay, that.queryDelay) && Objects.equals(this.queryDelay, that.queryDelay)
&& Objects.equals(this.indices, that.indices) && Objects.equals(this.indices, that.indices)
&& Objects.equals(this.types, that.types) && Objects.equals(this.types, that.types)
&& Objects.equals(this.query, that.query) && Objects.equals(asMap(this.query), asMap(that.query))
&& Objects.equals(this.scrollSize, that.scrollSize) && Objects.equals(this.scrollSize, that.scrollSize)
&& Objects.equals(this.aggregations, that.aggregations) && Objects.equals(asMap(this.aggregations), asMap(that.aggregations))
&& Objects.equals(this.scriptFields, that.scriptFields) && Objects.equals(this.scriptFields, that.scriptFields)
&& Objects.equals(this.chunkingConfig, that.chunkingConfig); && Objects.equals(this.chunkingConfig, that.chunkingConfig);
} }
/**
* Note this could be a heavy operation when a query or aggregations
* are set as we need to convert the bytes references into maps to
* compute a stable hash code.
*/
@Override @Override
public int hashCode() { public int hashCode() {
return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields, return Objects.hash(id, jobId, frequency, queryDelay, indices, types, asMap(query), scrollSize, asMap(aggregations), scriptFields,
chunkingConfig); chunkingConfig);
} }
public static Builder builder(String id, String jobId) {
return new Builder(id, jobId);
}
public static class Builder { public static class Builder {
private String id; private String id;
private String jobId; private String jobId;
private TimeValue queryDelay; private TimeValue queryDelay;
private TimeValue frequency; private TimeValue frequency;
private List<String> indices = Collections.emptyList(); private List<String> indices;
private List<String> types = Collections.emptyList(); private List<String> types;
private QueryBuilder query = QueryBuilders.matchAllQuery(); private BytesReference query;
private AggregatorFactories.Builder aggregations; private BytesReference aggregations;
private List<SearchSourceBuilder.ScriptField> scriptFields; private List<SearchSourceBuilder.ScriptField> scriptFields;
private Integer scrollSize = DEFAULT_SCROLL_SIZE; private Integer scrollSize;
private ChunkingConfig chunkingConfig; private ChunkingConfig chunkingConfig;
public Builder(String id, String jobId) { public Builder(String id, String jobId) {
@ -279,8 +300,12 @@ public class DatafeedConfig implements ToXContentObject {
return this; return this;
} }
public Builder setIndices(String... indices) {
return setIndices(Arrays.asList(indices));
}
public Builder setTypes(List<String> types) { public Builder setTypes(List<String> types) {
this.types = Objects.requireNonNull(types, TYPES.getPreferredName()); this.types = types;
return this; return this;
} }
@ -294,16 +319,36 @@ public class DatafeedConfig implements ToXContentObject {
return this; return this;
} }
public Builder setQuery(QueryBuilder query) { private Builder setQuery(BytesReference query) {
this.query = Objects.requireNonNull(query, QUERY.getPreferredName()); this.query = query;
return this; return this;
} }
public Builder setAggregations(AggregatorFactories.Builder aggregations) { public Builder setQuery(String queryAsJson) {
this.query = queryAsJson == null ? null : new BytesArray(queryAsJson);
return this;
}
public Builder setQuery(QueryBuilder query) throws IOException {
this.query = query == null ? null : xContentToBytes(query);
return this;
}
private Builder setAggregations(BytesReference aggregations) {
this.aggregations = aggregations; this.aggregations = aggregations;
return this; return this;
} }
public Builder setAggregations(String aggsAsJson) {
this.aggregations = aggsAsJson == null ? null : new BytesArray(aggsAsJson);
return this;
}
public Builder setAggregations(AggregatorFactories.Builder aggregations) throws IOException {
this.aggregations = aggregations == null ? null : xContentToBytes(aggregations);
return this;
}
public Builder setScriptFields(List<SearchSourceBuilder.ScriptField> scriptFields) { public Builder setScriptFields(List<SearchSourceBuilder.ScriptField> scriptFields) {
List<SearchSourceBuilder.ScriptField> sorted = new ArrayList<>(scriptFields); List<SearchSourceBuilder.ScriptField> sorted = new ArrayList<>(scriptFields);
sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName)); sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName));
@ -325,5 +370,12 @@ public class DatafeedConfig implements ToXContentObject {
return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize,
chunkingConfig); chunkingConfig);
} }
private static BytesReference xContentToBytes(ToXContentObject object) throws IOException {
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
object.toXContent(builder, ToXContentObject.EMPTY_PARAMS);
return BytesReference.bytes(builder);
}
}
} }
} }

View File

@ -20,12 +20,17 @@ package org.elasticsearch.client.ml.datafeed;
import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder;
@ -35,6 +40,7 @@ import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Objects; import java.util.Objects;
/** /**
@ -58,11 +64,9 @@ public class DatafeedUpdate implements ToXContentObject {
TimeValue.parseTimeValue(val, DatafeedConfig.QUERY_DELAY.getPreferredName())), DatafeedConfig.QUERY_DELAY); TimeValue.parseTimeValue(val, DatafeedConfig.QUERY_DELAY.getPreferredName())), DatafeedConfig.QUERY_DELAY);
PARSER.declareString((builder, val) -> builder.setFrequency( PARSER.declareString((builder, val) -> builder.setFrequency(
TimeValue.parseTimeValue(val, DatafeedConfig.FREQUENCY.getPreferredName())), DatafeedConfig.FREQUENCY); TimeValue.parseTimeValue(val, DatafeedConfig.FREQUENCY.getPreferredName())), DatafeedConfig.FREQUENCY);
PARSER.declareObject(Builder::setQuery, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), DatafeedConfig.QUERY); PARSER.declareField(Builder::setQuery, DatafeedUpdate::parseBytes, DatafeedConfig.QUERY, ObjectParser.ValueType.OBJECT);
PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p), PARSER.declareField(Builder::setAggregations, DatafeedUpdate::parseBytes, DatafeedConfig.AGGREGATIONS,
DatafeedConfig.AGGREGATIONS); ObjectParser.ValueType.OBJECT);
PARSER.declareObject(Builder::setAggregations, (p, c) -> AggregatorFactories.parseAggregators(p),
DatafeedConfig.AGGS);
PARSER.declareObject(Builder::setScriptFields, (p, c) -> { PARSER.declareObject(Builder::setScriptFields, (p, c) -> {
List<SearchSourceBuilder.ScriptField> parsedScriptFields = new ArrayList<>(); List<SearchSourceBuilder.ScriptField> parsedScriptFields = new ArrayList<>();
while (p.nextToken() != XContentParser.Token.END_OBJECT) { while (p.nextToken() != XContentParser.Token.END_OBJECT) {
@ -74,20 +78,26 @@ public class DatafeedUpdate implements ToXContentObject {
PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, DatafeedConfig.CHUNKING_CONFIG); PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, DatafeedConfig.CHUNKING_CONFIG);
} }
private static BytesReference parseBytes(XContentParser parser) throws IOException {
XContentBuilder contentBuilder = JsonXContent.contentBuilder();
contentBuilder.generator().copyCurrentStructure(parser);
return BytesReference.bytes(contentBuilder);
}
private final String id; private final String id;
private final String jobId; private final String jobId;
private final TimeValue queryDelay; private final TimeValue queryDelay;
private final TimeValue frequency; private final TimeValue frequency;
private final List<String> indices; private final List<String> indices;
private final List<String> types; private final List<String> types;
private final QueryBuilder query; private final BytesReference query;
private final AggregatorFactories.Builder aggregations; private final BytesReference aggregations;
private final List<SearchSourceBuilder.ScriptField> scriptFields; private final List<SearchSourceBuilder.ScriptField> scriptFields;
private final Integer scrollSize; private final Integer scrollSize;
private final ChunkingConfig chunkingConfig; private final ChunkingConfig chunkingConfig;
private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices, List<String> types, private DatafeedUpdate(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices, List<String> types,
QueryBuilder query, AggregatorFactories.Builder aggregations, List<SearchSourceBuilder.ScriptField> scriptFields, BytesReference query, BytesReference aggregations, List<SearchSourceBuilder.ScriptField> scriptFields,
Integer scrollSize, ChunkingConfig chunkingConfig) { Integer scrollSize, ChunkingConfig chunkingConfig) {
this.id = id; this.id = id;
this.jobId = jobId; this.jobId = jobId;
@ -121,9 +131,13 @@ public class DatafeedUpdate implements ToXContentObject {
builder.field(DatafeedConfig.FREQUENCY.getPreferredName(), frequency.getStringRep()); builder.field(DatafeedConfig.FREQUENCY.getPreferredName(), frequency.getStringRep());
} }
addOptionalField(builder, DatafeedConfig.INDICES, indices); addOptionalField(builder, DatafeedConfig.INDICES, indices);
if (query != null) {
builder.field(DatafeedConfig.QUERY.getPreferredName(), asMap(query));
}
if (aggregations != null) {
builder.field(DatafeedConfig.AGGREGATIONS.getPreferredName(), asMap(aggregations));
}
addOptionalField(builder, DatafeedConfig.TYPES, types); addOptionalField(builder, DatafeedConfig.TYPES, types);
addOptionalField(builder, DatafeedConfig.QUERY, query);
addOptionalField(builder, DatafeedConfig.AGGREGATIONS, aggregations);
if (scriptFields != null) { if (scriptFields != null) {
builder.startObject(DatafeedConfig.SCRIPT_FIELDS.getPreferredName()); builder.startObject(DatafeedConfig.SCRIPT_FIELDS.getPreferredName());
for (SearchSourceBuilder.ScriptField scriptField : scriptFields) { for (SearchSourceBuilder.ScriptField scriptField : scriptFields) {
@ -167,11 +181,11 @@ public class DatafeedUpdate implements ToXContentObject {
return scrollSize; return scrollSize;
} }
public QueryBuilder getQuery() { public BytesReference getQuery() {
return query; return query;
} }
public AggregatorFactories.Builder getAggregations() { public BytesReference getAggregations() {
return aggregations; return aggregations;
} }
@ -183,10 +197,18 @@ public class DatafeedUpdate implements ToXContentObject {
return chunkingConfig; return chunkingConfig;
} }
private static Map<String, Object> asMap(BytesReference bytesReference) {
return bytesReference == null ? null : XContentHelper.convertToMap(bytesReference, true, XContentType.JSON).v2();
}
/** /**
* The lists of indices and types are compared for equality but they are not * The lists of indices and types are compared for equality but they are not
* sorted first so this test could fail simply because the indices and types * sorted first so this test could fail simply because the indices and types
* lists are in different orders. * lists are in different orders.
*
* Also note this could be a heavy operation when a query or aggregations
* are set as we need to convert the bytes references into maps to correctly
* compare them.
*/ */
@Override @Override
public boolean equals(Object other) { public boolean equals(Object other) {
@ -206,19 +228,28 @@ public class DatafeedUpdate implements ToXContentObject {
&& Objects.equals(this.queryDelay, that.queryDelay) && Objects.equals(this.queryDelay, that.queryDelay)
&& Objects.equals(this.indices, that.indices) && Objects.equals(this.indices, that.indices)
&& Objects.equals(this.types, that.types) && Objects.equals(this.types, that.types)
&& Objects.equals(this.query, that.query) && Objects.equals(asMap(this.query), asMap(that.query))
&& Objects.equals(this.scrollSize, that.scrollSize) && Objects.equals(this.scrollSize, that.scrollSize)
&& Objects.equals(this.aggregations, that.aggregations) && Objects.equals(asMap(this.aggregations), asMap(that.aggregations))
&& Objects.equals(this.scriptFields, that.scriptFields) && Objects.equals(this.scriptFields, that.scriptFields)
&& Objects.equals(this.chunkingConfig, that.chunkingConfig); && Objects.equals(this.chunkingConfig, that.chunkingConfig);
} }
/**
* Note this could be a heavy operation when a query or aggregations
* are set as we need to convert the bytes references into maps to
* compute a stable hash code.
*/
@Override @Override
public int hashCode() { public int hashCode() {
return Objects.hash(id, jobId, frequency, queryDelay, indices, types, query, scrollSize, aggregations, scriptFields, return Objects.hash(id, jobId, frequency, queryDelay, indices, types, asMap(query), scrollSize, asMap(aggregations), scriptFields,
chunkingConfig); chunkingConfig);
} }
public static Builder builder(String id) {
return new Builder(id);
}
public static class Builder { public static class Builder {
private String id; private String id;
@ -227,8 +258,8 @@ public class DatafeedUpdate implements ToXContentObject {
private TimeValue frequency; private TimeValue frequency;
private List<String> indices; private List<String> indices;
private List<String> types; private List<String> types;
private QueryBuilder query; private BytesReference query;
private AggregatorFactories.Builder aggregations; private BytesReference aggregations;
private List<SearchSourceBuilder.ScriptField> scriptFields; private List<SearchSourceBuilder.ScriptField> scriptFields;
private Integer scrollSize; private Integer scrollSize;
private ChunkingConfig chunkingConfig; private ChunkingConfig chunkingConfig;
@ -276,16 +307,36 @@ public class DatafeedUpdate implements ToXContentObject {
return this; return this;
} }
public Builder setQuery(QueryBuilder query) { private Builder setQuery(BytesReference query) {
this.query = query; this.query = query;
return this; return this;
} }
public Builder setAggregations(AggregatorFactories.Builder aggregations) { public Builder setQuery(String queryAsJson) {
this.query = queryAsJson == null ? null : new BytesArray(queryAsJson);
return this;
}
public Builder setQuery(QueryBuilder query) throws IOException {
this.query = query == null ? null : xContentToBytes(query);
return this;
}
private Builder setAggregations(BytesReference aggregations) {
this.aggregations = aggregations; this.aggregations = aggregations;
return this; return this;
} }
public Builder setAggregations(String aggsAsJson) {
this.aggregations = aggsAsJson == null ? null : new BytesArray(aggsAsJson);
return this;
}
public Builder setAggregations(AggregatorFactories.Builder aggregations) throws IOException {
this.aggregations = aggregations == null ? null : xContentToBytes(aggregations);
return this;
}
public Builder setScriptFields(List<SearchSourceBuilder.ScriptField> scriptFields) { public Builder setScriptFields(List<SearchSourceBuilder.ScriptField> scriptFields) {
List<SearchSourceBuilder.ScriptField> sorted = new ArrayList<>(scriptFields); List<SearchSourceBuilder.ScriptField> sorted = new ArrayList<>(scriptFields);
sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName)); sorted.sort(Comparator.comparing(SearchSourceBuilder.ScriptField::fieldName));
@ -307,5 +358,12 @@ public class DatafeedUpdate implements ToXContentObject {
return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize, return new DatafeedUpdate(id, jobId, queryDelay, frequency, indices, types, query, aggregations, scriptFields, scrollSize,
chunkingConfig); chunkingConfig);
} }
private static BytesReference xContentToBytes(ToXContentObject object) throws IOException {
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
object.toXContent(builder, ToXContentObject.EMPTY_PARAMS);
return BytesReference.bytes(builder);
}
}
} }
} }

View File

@ -24,10 +24,12 @@ import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpPut;
import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobRequest;
import org.elasticsearch.client.ml.DeleteForecastRequest;
import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobRequest;
import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.FlushJobRequest;
import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.ForecastJobRequest;
import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsRequest;
import org.elasticsearch.client.ml.GetCategoriesRequest;
import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetInfluencersRequest;
import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobRequest;
import org.elasticsearch.client.ml.GetJobStatsRequest; import org.elasticsearch.client.ml.GetJobStatsRequest;
@ -35,14 +37,18 @@ import org.elasticsearch.client.ml.GetOverallBucketsRequest;
import org.elasticsearch.client.ml.GetRecordsRequest; import org.elasticsearch.client.ml.GetRecordsRequest;
import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.OpenJobRequest;
import org.elasticsearch.client.ml.PostDataRequest; import org.elasticsearch.client.ml.PostDataRequest;
import org.elasticsearch.client.ml.PutDatafeedRequest;
import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobRequest;
import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.datafeed.DatafeedConfig;
import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests;
import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.AnalysisConfig;
import org.elasticsearch.client.ml.job.config.Detector; import org.elasticsearch.client.ml.job.config.Detector;
import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.client.ml.job.config.JobUpdate; import org.elasticsearch.client.ml.job.config.JobUpdate;
import org.elasticsearch.client.ml.job.config.JobUpdateTests; import org.elasticsearch.client.ml.job.config.JobUpdateTests;
import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.client.ml.job.util.PageParams;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
@ -203,6 +209,47 @@ public class MLRequestConvertersTests extends ESTestCase {
} }
} }
public void testPutDatafeed() throws IOException {
DatafeedConfig datafeed = DatafeedConfigTests.createRandom();
PutDatafeedRequest putDatafeedRequest = new PutDatafeedRequest(datafeed);
Request request = MLRequestConverters.putDatafeed(putDatafeedRequest);
assertEquals(HttpPut.METHOD_NAME, request.getMethod());
assertThat(request.getEndpoint(), equalTo("/_xpack/ml/datafeeds/" + datafeed.getId()));
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
DatafeedConfig parsedDatafeed = DatafeedConfig.PARSER.apply(parser, null).build();
assertThat(parsedDatafeed, equalTo(datafeed));
}
}
public void testDeleteForecast() throws Exception {
String jobId = randomAlphaOfLength(10);
DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest(jobId);
Request request = MLRequestConverters.deleteForecast(deleteForecastRequest);
assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_forecast", request.getEndpoint());
assertFalse(request.getParameters().containsKey("timeout"));
assertFalse(request.getParameters().containsKey("allow_no_forecasts"));
deleteForecastRequest.setForecastIds(randomAlphaOfLength(10), randomAlphaOfLength(10));
deleteForecastRequest.timeout("10s");
deleteForecastRequest.setAllowNoForecasts(true);
request = MLRequestConverters.deleteForecast(deleteForecastRequest);
assertEquals(
"/_xpack/ml/anomaly_detectors/" +
jobId +
"/_forecast/" +
Strings.collectionToCommaDelimitedString(deleteForecastRequest.getForecastIds()),
request.getEndpoint());
assertEquals("10s",
request.getParameters().get(DeleteForecastRequest.TIMEOUT.getPreferredName()));
assertEquals(Boolean.toString(true),
request.getParameters().get(DeleteForecastRequest.ALLOW_NO_FORECASTS.getPreferredName()));
}
public void testGetBuckets() throws IOException { public void testGetBuckets() throws IOException {
String jobId = randomAlphaOfLength(10); String jobId = randomAlphaOfLength(10);
GetBucketsRequest getBucketsRequest = new GetBucketsRequest(jobId); GetBucketsRequest getBucketsRequest = new GetBucketsRequest(jobId);
@ -220,6 +267,21 @@ public class MLRequestConvertersTests extends ESTestCase {
} }
} }
public void testGetCategories() throws IOException {
String jobId = randomAlphaOfLength(10);
GetCategoriesRequest getCategoriesRequest = new GetCategoriesRequest(jobId);
getCategoriesRequest.setPageParams(new PageParams(100, 300));
Request request = MLRequestConverters.getCategories(getCategoriesRequest);
assertEquals(HttpGet.METHOD_NAME, request.getMethod());
assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/results/categories", request.getEndpoint());
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
GetCategoriesRequest parsedRequest = GetCategoriesRequest.PARSER.apply(parser, null);
assertThat(parsedRequest, equalTo(getCategoriesRequest));
}
}
public void testGetOverallBuckets() throws IOException { public void testGetOverallBuckets() throws IOException {
String jobId = randomAlphaOfLength(10); String jobId = randomAlphaOfLength(10);
GetOverallBucketsRequest getOverallBucketsRequest = new GetOverallBucketsRequest(jobId); GetOverallBucketsRequest getOverallBucketsRequest = new GetOverallBucketsRequest(jobId);

View File

@ -23,6 +23,8 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsRequest;
import org.elasticsearch.client.ml.GetBucketsResponse; import org.elasticsearch.client.ml.GetBucketsResponse;
import org.elasticsearch.client.ml.GetCategoriesRequest;
import org.elasticsearch.client.ml.GetCategoriesResponse;
import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetInfluencersRequest;
import org.elasticsearch.client.ml.GetInfluencersResponse; import org.elasticsearch.client.ml.GetInfluencersResponse;
import org.elasticsearch.client.ml.GetOverallBucketsRequest; import org.elasticsearch.client.ml.GetOverallBucketsRequest;
@ -126,11 +128,150 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
bulkRequest.add(indexRequest); bulkRequest.add(indexRequest);
} }
private void addCategoryIndexRequest(long categoryId, String categoryName, BulkRequest bulkRequest) {
IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX, DOC);
indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"category_id\": " + categoryId + ", \"terms\": \"" +
categoryName + "\", \"regex\": \".*?" + categoryName + ".*\", \"max_matching_length\": 3, \"examples\": [\"" +
categoryName + "\"]}", XContentType.JSON);
bulkRequest.add(indexRequest);
}
private void addCategoriesIndexRequests(BulkRequest bulkRequest) {
List<String> categories = Arrays.asList("AAL", "JZA", "JBU");
for (int i = 0; i < categories.size(); i++) {
addCategoryIndexRequest(i+1, categories.get(i), bulkRequest);
}
}
@After @After
public void deleteJob() throws IOException { public void deleteJob() throws IOException {
new MlRestTestStateCleaner(logger, client()).clearMlMetadata(); new MlRestTestStateCleaner(logger, client()).clearMlMetadata();
} }
public void testGetCategories() throws IOException {
// index some category results
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
addCategoriesIndexRequests(bulkRequest);
highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT);
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
{
GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID);
request.setPageParams(new PageParams(0, 10000));
GetCategoriesResponse response = execute(request, machineLearningClient::getCategories,
machineLearningClient::getCategoriesAsync);
assertThat(response.count(), equalTo(3L));
assertThat(response.categories().size(), equalTo(3));
assertThat(response.categories().get(0).getCategoryId(), equalTo(1L));
assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?AAL.*"));
assertThat(response.categories().get(0).getRegex(), equalTo(".*?AAL.*"));
assertThat(response.categories().get(0).getTerms(), equalTo("AAL"));
assertThat(response.categories().get(1).getCategoryId(), equalTo(2L));
assertThat(response.categories().get(1).getGrokPattern(), equalTo(".*?JZA.*"));
assertThat(response.categories().get(1).getRegex(), equalTo(".*?JZA.*"));
assertThat(response.categories().get(1).getTerms(), equalTo("JZA"));
assertThat(response.categories().get(2).getCategoryId(), equalTo(3L));
assertThat(response.categories().get(2).getGrokPattern(), equalTo(".*?JBU.*"));
assertThat(response.categories().get(2).getRegex(), equalTo(".*?JBU.*"));
assertThat(response.categories().get(2).getTerms(), equalTo("JBU"));
}
{
GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID);
request.setPageParams(new PageParams(0, 1));
GetCategoriesResponse response = execute(request, machineLearningClient::getCategories,
machineLearningClient::getCategoriesAsync);
assertThat(response.count(), equalTo(3L));
assertThat(response.categories().size(), equalTo(1));
assertThat(response.categories().get(0).getCategoryId(), equalTo(1L));
assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?AAL.*"));
assertThat(response.categories().get(0).getRegex(), equalTo(".*?AAL.*"));
assertThat(response.categories().get(0).getTerms(), equalTo("AAL"));
}
{
GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID);
request.setPageParams(new PageParams(1, 2));
GetCategoriesResponse response = execute(request, machineLearningClient::getCategories,
machineLearningClient::getCategoriesAsync);
assertThat(response.count(), equalTo(3L));
assertThat(response.categories().size(), equalTo(2));
assertThat(response.categories().get(0).getCategoryId(), equalTo(2L));
assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?JZA.*"));
assertThat(response.categories().get(0).getRegex(), equalTo(".*?JZA.*"));
assertThat(response.categories().get(0).getTerms(), equalTo("JZA"));
assertThat(response.categories().get(1).getCategoryId(), equalTo(3L));
assertThat(response.categories().get(1).getGrokPattern(), equalTo(".*?JBU.*"));
assertThat(response.categories().get(1).getRegex(), equalTo(".*?JBU.*"));
assertThat(response.categories().get(1).getTerms(), equalTo("JBU"));
}
{
GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID);
request.setCategoryId(0L); // request a non-existent category
GetCategoriesResponse response = execute(request, machineLearningClient::getCategories,
machineLearningClient::getCategoriesAsync);
assertThat(response.count(), equalTo(0L));
assertThat(response.categories().size(), equalTo(0));
}
{
GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID);
request.setCategoryId(1L);
GetCategoriesResponse response = execute(request, machineLearningClient::getCategories,
machineLearningClient::getCategoriesAsync);
assertThat(response.count(), equalTo(1L));
assertThat(response.categories().size(), equalTo(1));
assertThat(response.categories().get(0).getCategoryId(), equalTo(1L));
assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?AAL.*"));
assertThat(response.categories().get(0).getRegex(), equalTo(".*?AAL.*"));
assertThat(response.categories().get(0).getTerms(), equalTo("AAL"));
}
{
GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID);
request.setCategoryId(2L);
GetCategoriesResponse response = execute(request, machineLearningClient::getCategories,
machineLearningClient::getCategoriesAsync);
assertThat(response.count(), equalTo(1L));
assertThat(response.categories().get(0).getCategoryId(), equalTo(2L));
assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?JZA.*"));
assertThat(response.categories().get(0).getRegex(), equalTo(".*?JZA.*"));
assertThat(response.categories().get(0).getTerms(), equalTo("JZA"));
}
{
GetCategoriesRequest request = new GetCategoriesRequest(JOB_ID);
request.setCategoryId(3L);
GetCategoriesResponse response = execute(request, machineLearningClient::getCategories,
machineLearningClient::getCategoriesAsync);
assertThat(response.count(), equalTo(1L));
assertThat(response.categories().get(0).getCategoryId(), equalTo(3L));
assertThat(response.categories().get(0).getGrokPattern(), equalTo(".*?JBU.*"));
assertThat(response.categories().get(0).getRegex(), equalTo(".*?JBU.*"));
assertThat(response.categories().get(0).getTerms(), equalTo("JBU"));
}
}
public void testGetBuckets() throws IOException { public void testGetBuckets() throws IOException {
MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); MachineLearningClient machineLearningClient = highLevelClient().machineLearning();

View File

@ -20,33 +20,40 @@ package org.elasticsearch.client;
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.client.ml.ForecastJobResponse; import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.client.ml.PostDataRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.ml.PostDataResponse;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.job.config.JobUpdate;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.client.ml.GetJobStatsRequest;
import org.elasticsearch.client.ml.GetJobStatsResponse;
import org.elasticsearch.client.ml.job.config.JobState;
import org.elasticsearch.client.ml.job.stats.JobStats;
import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobRequest;
import org.elasticsearch.client.ml.CloseJobResponse; import org.elasticsearch.client.ml.CloseJobResponse;
import org.elasticsearch.client.ml.DeleteForecastRequest;
import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobRequest;
import org.elasticsearch.client.ml.DeleteJobResponse; import org.elasticsearch.client.ml.DeleteJobResponse;
import org.elasticsearch.client.ml.FlushJobRequest;
import org.elasticsearch.client.ml.FlushJobResponse;
import org.elasticsearch.client.ml.ForecastJobRequest;
import org.elasticsearch.client.ml.ForecastJobResponse;
import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobRequest;
import org.elasticsearch.client.ml.GetJobResponse; import org.elasticsearch.client.ml.GetJobResponse;
import org.elasticsearch.client.ml.GetJobStatsRequest;
import org.elasticsearch.client.ml.GetJobStatsResponse;
import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.OpenJobRequest;
import org.elasticsearch.client.ml.OpenJobResponse; import org.elasticsearch.client.ml.OpenJobResponse;
import org.elasticsearch.client.ml.PostDataRequest;
import org.elasticsearch.client.ml.PostDataResponse;
import org.elasticsearch.client.ml.PutDatafeedRequest;
import org.elasticsearch.client.ml.PutDatafeedResponse;
import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobRequest;
import org.elasticsearch.client.ml.PutJobResponse; import org.elasticsearch.client.ml.PutJobResponse;
import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.datafeed.DatafeedConfig;
import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.AnalysisConfig;
import org.elasticsearch.client.ml.job.config.DataDescription; import org.elasticsearch.client.ml.job.config.DataDescription;
import org.elasticsearch.client.ml.job.config.Detector; import org.elasticsearch.client.ml.job.config.Detector;
import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.job.config.JobState;
import org.elasticsearch.client.ml.FlushJobResponse; import org.elasticsearch.client.ml.job.config.JobUpdate;
import org.elasticsearch.client.ml.job.stats.JobStats;
import org.elasticsearch.common.unit.TimeValue;
import org.junit.After; import org.junit.After;
import java.io.IOException; import java.io.IOException;
@ -288,6 +295,92 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
assertEquals("Updated description", getResponse.jobs().get(0).getDescription()); assertEquals("Updated description", getResponse.jobs().get(0).getDescription());
} }
public void testPutDatafeed() throws Exception {
String jobId = randomValidJobId();
Job job = buildJob(jobId);
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
execute(new PutJobRequest(job), machineLearningClient::putJob, machineLearningClient::putJobAsync);
String datafeedId = "datafeed-" + jobId;
DatafeedConfig datafeedConfig = DatafeedConfig.builder(datafeedId, jobId).setIndices("some_data_index").build();
PutDatafeedResponse response = execute(new PutDatafeedRequest(datafeedConfig), machineLearningClient::putDatafeed,
machineLearningClient::putDatafeedAsync);
DatafeedConfig createdDatafeed = response.getResponse();
assertThat(createdDatafeed.getId(), equalTo(datafeedId));
assertThat(createdDatafeed.getIndices(), equalTo(datafeedConfig.getIndices()));
}
public void testDeleteForecast() throws Exception {
String jobId = "test-delete-forecast";
Job job = buildJob(jobId);
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT);
Job noForecastsJob = buildJob("test-delete-forecast-none");
machineLearningClient.putJob(new PutJobRequest(noForecastsJob), RequestOptions.DEFAULT);
PostDataRequest.JsonBuilder builder = new PostDataRequest.JsonBuilder();
for(int i = 0; i < 30; i++) {
Map<String, Object> hashMap = new HashMap<>();
hashMap.put("total", randomInt(1000));
hashMap.put("timestamp", (i+1)*1000);
builder.addDoc(hashMap);
}
PostDataRequest postDataRequest = new PostDataRequest(jobId, builder);
machineLearningClient.postData(postDataRequest, RequestOptions.DEFAULT);
machineLearningClient.flushJob(new FlushJobRequest(jobId), RequestOptions.DEFAULT);
ForecastJobResponse forecastJobResponse1 = machineLearningClient.forecastJob(new ForecastJobRequest(jobId), RequestOptions.DEFAULT);
ForecastJobResponse forecastJobResponse2 = machineLearningClient.forecastJob(new ForecastJobRequest(jobId), RequestOptions.DEFAULT);
waitForForecastToComplete(jobId, forecastJobResponse1.getForecastId());
waitForForecastToComplete(jobId, forecastJobResponse2.getForecastId());
{
DeleteForecastRequest request = new DeleteForecastRequest(jobId);
request.setForecastIds(forecastJobResponse1.getForecastId(), forecastJobResponse2.getForecastId());
AcknowledgedResponse response = execute(request, machineLearningClient::deleteForecast,
machineLearningClient::deleteForecastAsync);
assertTrue(response.isAcknowledged());
assertFalse(forecastExists(jobId, forecastJobResponse1.getForecastId()));
assertFalse(forecastExists(jobId, forecastJobResponse2.getForecastId()));
}
{
DeleteForecastRequest request = DeleteForecastRequest.deleteAllForecasts(noForecastsJob.getId());
request.setAllowNoForecasts(true);
AcknowledgedResponse response = execute(request, machineLearningClient::deleteForecast,
machineLearningClient::deleteForecastAsync);
assertTrue(response.isAcknowledged());
}
{
DeleteForecastRequest request = DeleteForecastRequest.deleteAllForecasts(noForecastsJob.getId());
request.setAllowNoForecasts(false);
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class,
() -> execute(request, machineLearningClient::deleteForecast, machineLearningClient::deleteForecastAsync));
assertThat(exception.status().getStatus(), equalTo(404));
}
}
private void waitForForecastToComplete(String jobId, String forecastId) throws Exception {
GetRequest request = new GetRequest(".ml-anomalies-" + jobId);
request.id(jobId + "_model_forecast_request_stats_" + forecastId);
assertBusy(() -> {
GetResponse getResponse = highLevelClient().get(request, RequestOptions.DEFAULT);
assertTrue(getResponse.isExists());
assertTrue(getResponse.getSourceAsString().contains("finished"));
}, 30, TimeUnit.SECONDS);
}
private boolean forecastExists(String jobId, String forecastId) throws Exception {
GetRequest getRequest = new GetRequest(".ml-anomalies-" + jobId);
getRequest.id(jobId + "_model_forecast_request_stats_" + forecastId);
GetResponse getResponse = highLevelClient().get(getRequest, RequestOptions.DEFAULT);
return getResponse.isExists();
}
public static String randomValidJobId() { public static String randomValidJobId() {
CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray()); CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray());
return generator.ofCodePointsLength(random(), 10, 10); return generator.ofCodePointsLength(random(), 10, 10);

View File

@ -21,8 +21,11 @@ package org.elasticsearch.client.documentation;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.LatchedActionListener;
import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.MachineLearningGetResultsIT; import org.elasticsearch.client.MachineLearningGetResultsIT;
import org.elasticsearch.client.MachineLearningIT; import org.elasticsearch.client.MachineLearningIT;
@ -31,6 +34,7 @@ import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobRequest;
import org.elasticsearch.client.ml.CloseJobResponse; import org.elasticsearch.client.ml.CloseJobResponse;
import org.elasticsearch.client.ml.DeleteForecastRequest;
import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.DeleteJobRequest;
import org.elasticsearch.client.ml.DeleteJobResponse; import org.elasticsearch.client.ml.DeleteJobResponse;
import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.FlushJobRequest;
@ -39,6 +43,8 @@ import org.elasticsearch.client.ml.ForecastJobRequest;
import org.elasticsearch.client.ml.ForecastJobResponse; import org.elasticsearch.client.ml.ForecastJobResponse;
import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsRequest;
import org.elasticsearch.client.ml.GetBucketsResponse; import org.elasticsearch.client.ml.GetBucketsResponse;
import org.elasticsearch.client.ml.GetCategoriesRequest;
import org.elasticsearch.client.ml.GetCategoriesResponse;
import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetInfluencersRequest;
import org.elasticsearch.client.ml.GetInfluencersResponse; import org.elasticsearch.client.ml.GetInfluencersResponse;
import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobRequest;
@ -53,28 +59,36 @@ import org.elasticsearch.client.ml.OpenJobRequest;
import org.elasticsearch.client.ml.OpenJobResponse; import org.elasticsearch.client.ml.OpenJobResponse;
import org.elasticsearch.client.ml.PostDataRequest; import org.elasticsearch.client.ml.PostDataRequest;
import org.elasticsearch.client.ml.PostDataResponse; import org.elasticsearch.client.ml.PostDataResponse;
import org.elasticsearch.client.ml.PutDatafeedRequest;
import org.elasticsearch.client.ml.PutDatafeedResponse;
import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobRequest;
import org.elasticsearch.client.ml.PutJobResponse; import org.elasticsearch.client.ml.PutJobResponse;
import org.elasticsearch.client.ml.UpdateJobRequest; import org.elasticsearch.client.ml.UpdateJobRequest;
import org.elasticsearch.client.ml.datafeed.ChunkingConfig;
import org.elasticsearch.client.ml.datafeed.DatafeedConfig;
import org.elasticsearch.client.ml.job.config.AnalysisConfig; import org.elasticsearch.client.ml.job.config.AnalysisConfig;
import org.elasticsearch.client.ml.job.config.AnalysisLimits; import org.elasticsearch.client.ml.job.config.AnalysisLimits;
import org.elasticsearch.client.ml.job.config.DataDescription; import org.elasticsearch.client.ml.job.config.DataDescription;
import org.elasticsearch.client.ml.job.config.DetectionRule; import org.elasticsearch.client.ml.job.config.DetectionRule;
import org.elasticsearch.client.ml.job.config.Detector; import org.elasticsearch.client.ml.job.config.Detector;
import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.client.ml.job.config.Job;
import org.elasticsearch.client.ml.job.process.DataCounts;
import org.elasticsearch.client.ml.job.config.JobUpdate; import org.elasticsearch.client.ml.job.config.JobUpdate;
import org.elasticsearch.client.ml.job.config.ModelPlotConfig; import org.elasticsearch.client.ml.job.config.ModelPlotConfig;
import org.elasticsearch.client.ml.job.config.Operator; import org.elasticsearch.client.ml.job.config.Operator;
import org.elasticsearch.client.ml.job.config.RuleCondition; import org.elasticsearch.client.ml.job.config.RuleCondition;
import org.elasticsearch.client.ml.job.process.DataCounts;
import org.elasticsearch.client.ml.job.results.AnomalyRecord; import org.elasticsearch.client.ml.job.results.AnomalyRecord;
import org.elasticsearch.client.ml.job.results.Bucket; import org.elasticsearch.client.ml.job.results.Bucket;
import org.elasticsearch.client.ml.job.results.CategoryDefinition;
import org.elasticsearch.client.ml.job.results.Influencer; import org.elasticsearch.client.ml.job.results.Influencer;
import org.elasticsearch.client.ml.job.results.OverallBucket; import org.elasticsearch.client.ml.job.results.OverallBucket;
import org.elasticsearch.client.ml.job.stats.JobStats; import org.elasticsearch.client.ml.job.stats.JobStats;
import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.client.ml.job.util.PageParams;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.junit.After; import org.junit.After;
import java.io.IOException; import java.io.IOException;
@ -90,6 +104,7 @@ import java.util.stream.Collectors;
import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.closeTo;
import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.core.Is.is; import static org.hamcrest.core.Is.is;
@ -182,8 +197,6 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
public void testGetJob() throws Exception { public void testGetJob() throws Exception {
RestHighLevelClient client = highLevelClient(); RestHighLevelClient client = highLevelClient();
String jobId = "get-machine-learning-job1";
Job job = MachineLearningIT.buildJob("get-machine-learning-job1"); Job job = MachineLearningIT.buildJob("get-machine-learning-job1");
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
@ -474,6 +487,106 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
} }
} }
public void testPutDatafeed() throws Exception {
RestHighLevelClient client = highLevelClient();
{
// We need to create a job for the datafeed request to be valid
String jobId = "put-datafeed-job-1";
Job job = MachineLearningIT.buildJob(jobId);
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
String id = "datafeed-1";
//tag::x-pack-ml-create-datafeed-config
DatafeedConfig.Builder datafeedBuilder = new DatafeedConfig.Builder(id, jobId) // <1>
.setIndices("index_1", "index_2"); // <2>
//end::x-pack-ml-create-datafeed-config
AggregatorFactories.Builder aggs = AggregatorFactories.builder();
//tag::x-pack-ml-create-datafeed-config-set-aggregations
datafeedBuilder.setAggregations(aggs); // <1>
//end::x-pack-ml-create-datafeed-config-set-aggregations
// Clearing aggregation to avoid complex validation rules
datafeedBuilder.setAggregations((String) null);
//tag::x-pack-ml-create-datafeed-config-set-chunking-config
datafeedBuilder.setChunkingConfig(ChunkingConfig.newAuto()); // <1>
//end::x-pack-ml-create-datafeed-config-set-chunking-config
//tag::x-pack-ml-create-datafeed-config-set-frequency
datafeedBuilder.setFrequency(TimeValue.timeValueSeconds(30)); // <1>
//end::x-pack-ml-create-datafeed-config-set-frequency
//tag::x-pack-ml-create-datafeed-config-set-query
datafeedBuilder.setQuery(QueryBuilders.matchAllQuery()); // <1>
//end::x-pack-ml-create-datafeed-config-set-query
//tag::x-pack-ml-create-datafeed-config-set-query-delay
datafeedBuilder.setQueryDelay(TimeValue.timeValueMinutes(1)); // <1>
//end::x-pack-ml-create-datafeed-config-set-query-delay
List<SearchSourceBuilder.ScriptField> scriptFields = Collections.emptyList();
//tag::x-pack-ml-create-datafeed-config-set-script-fields
datafeedBuilder.setScriptFields(scriptFields); // <1>
//end::x-pack-ml-create-datafeed-config-set-script-fields
//tag::x-pack-ml-create-datafeed-config-set-scroll-size
datafeedBuilder.setScrollSize(1000); // <1>
//end::x-pack-ml-create-datafeed-config-set-scroll-size
//tag::x-pack-ml-put-datafeed-request
PutDatafeedRequest request = new PutDatafeedRequest(datafeedBuilder.build()); // <1>
//end::x-pack-ml-put-datafeed-request
//tag::x-pack-ml-put-datafeed-execute
PutDatafeedResponse response = client.machineLearning().putDatafeed(request, RequestOptions.DEFAULT);
//end::x-pack-ml-put-datafeed-execute
//tag::x-pack-ml-put-datafeed-response
DatafeedConfig datafeed = response.getResponse(); // <1>
//end::x-pack-ml-put-datafeed-response
assertThat(datafeed.getId(), equalTo("datafeed-1"));
}
{
// We need to create a job for the datafeed request to be valid
String jobId = "put-datafeed-job-2";
Job job = MachineLearningIT.buildJob(jobId);
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
String id = "datafeed-2";
DatafeedConfig datafeed = new DatafeedConfig.Builder(id, jobId).setIndices("index_1", "index_2").build();
PutDatafeedRequest request = new PutDatafeedRequest(datafeed);
// tag::x-pack-ml-put-datafeed-execute-listener
ActionListener<PutDatafeedResponse> listener = new ActionListener<PutDatafeedResponse>() {
@Override
public void onResponse(PutDatafeedResponse response) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::x-pack-ml-put-datafeed-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::x-pack-ml-put-datafeed-execute-async
client.machineLearning().putDatafeedAsync(request, RequestOptions.DEFAULT, listener); // <1>
// end::x-pack-ml-put-datafeed-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
public void testGetBuckets() throws IOException, InterruptedException { public void testGetBuckets() throws IOException, InterruptedException {
RestHighLevelClient client = highLevelClient(); RestHighLevelClient client = highLevelClient();
@ -637,6 +750,83 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
} }
} }
public void testDeleteForecast() throws Exception {
RestHighLevelClient client = highLevelClient();
Job job = MachineLearningIT.buildJob("deleting-forecast-for-job");
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT);
PostDataRequest.JsonBuilder builder = new PostDataRequest.JsonBuilder();
for(int i = 0; i < 30; i++) {
Map<String, Object> hashMap = new HashMap<>();
hashMap.put("total", randomInt(1000));
hashMap.put("timestamp", (i+1)*1000);
builder.addDoc(hashMap);
}
PostDataRequest postDataRequest = new PostDataRequest(job.getId(), builder);
client.machineLearning().postData(postDataRequest, RequestOptions.DEFAULT);
client.machineLearning().flushJob(new FlushJobRequest(job.getId()), RequestOptions.DEFAULT);
ForecastJobResponse forecastJobResponse = client.machineLearning().
forecastJob(new ForecastJobRequest(job.getId()), RequestOptions.DEFAULT);
String forecastId = forecastJobResponse.getForecastId();
GetRequest request = new GetRequest(".ml-anomalies-" + job.getId());
request.id(job.getId() + "_model_forecast_request_stats_" + forecastId);
assertBusy(() -> {
GetResponse getResponse = highLevelClient().get(request, RequestOptions.DEFAULT);
assertTrue(getResponse.isExists());
assertTrue(getResponse.getSourceAsString().contains("finished"));
}, 30, TimeUnit.SECONDS);
{
//tag::x-pack-ml-delete-forecast-request
DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest("deleting-forecast-for-job"); //<1>
//end::x-pack-ml-delete-forecast-request
//tag::x-pack-ml-delete-forecast-request-options
deleteForecastRequest.setForecastIds(forecastId); //<1>
deleteForecastRequest.timeout("30s"); //<2>
deleteForecastRequest.setAllowNoForecasts(true); //<3>
//end::x-pack-ml-delete-forecast-request-options
//tag::x-pack-ml-delete-forecast-execute
AcknowledgedResponse deleteForecastResponse = client.machineLearning().deleteForecast(deleteForecastRequest,
RequestOptions.DEFAULT);
//end::x-pack-ml-delete-forecast-execute
//tag::x-pack-ml-delete-forecast-response
boolean isAcknowledged = deleteForecastResponse.isAcknowledged(); //<1>
//end::x-pack-ml-delete-forecast-response
}
{
//tag::x-pack-ml-delete-forecast-listener
ActionListener<AcknowledgedResponse> listener = new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(AcknowledgedResponse DeleteForecastResponse) {
//<1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
//end::x-pack-ml-delete-forecast-listener
DeleteForecastRequest deleteForecastRequest = DeleteForecastRequest.deleteAllForecasts(job.getId());
deleteForecastRequest.setAllowNoForecasts(true);
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::x-pack-ml-delete-forecast-execute-async
client.machineLearning().deleteForecastAsync(deleteForecastRequest, RequestOptions.DEFAULT, listener); //<1>
// end::x-pack-ml-delete-forecast-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
public void testGetJobStats() throws Exception { public void testGetJobStats() throws Exception {
RestHighLevelClient client = highLevelClient(); RestHighLevelClient client = highLevelClient();
@ -1111,4 +1301,74 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
assertTrue(latch.await(30L, TimeUnit.SECONDS)); assertTrue(latch.await(30L, TimeUnit.SECONDS));
} }
} }
public void testGetCategories() throws IOException, InterruptedException {
RestHighLevelClient client = highLevelClient();
String jobId = "test-get-categories";
Job job = MachineLearningIT.buildJob(jobId);
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
// Let us index a category
IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc");
indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
indexRequest.source("{\"job_id\": \"test-get-categories\", \"category_id\": 1, \"terms\": \"AAL\"," +
" \"regex\": \".*?AAL.*\", \"max_matching_length\": 3, \"examples\": [\"AAL\"]}", XContentType.JSON);
client.index(indexRequest, RequestOptions.DEFAULT);
{
// tag::x-pack-ml-get-categories-request
GetCategoriesRequest request = new GetCategoriesRequest(jobId); // <1>
// end::x-pack-ml-get-categories-request
// tag::x-pack-ml-get-categories-category-id
request.setCategoryId(1L); // <1>
// end::x-pack-ml-get-categories-category-id
// tag::x-pack-ml-get-categories-page
request.setPageParams(new PageParams(100, 200)); // <1>
// end::x-pack-ml-get-categories-page
// Set page params back to null so the response contains the category we indexed
request.setPageParams(null);
// tag::x-pack-ml-get-categories-execute
GetCategoriesResponse response = client.machineLearning().getCategories(request, RequestOptions.DEFAULT);
// end::x-pack-ml-get-categories-execute
// tag::x-pack-ml-get-categories-response
long count = response.count(); // <1>
List<CategoryDefinition> categories = response.categories(); // <2>
// end::x-pack-ml-get-categories-response
assertEquals(1, categories.size());
}
{
GetCategoriesRequest request = new GetCategoriesRequest(jobId);
// tag::x-pack-ml-get-categories-listener
ActionListener<GetCategoriesResponse> listener =
new ActionListener<GetCategoriesResponse>() {
@Override
public void onResponse(GetCategoriesResponse getcategoriesResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::x-pack-ml-get-categories-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::x-pack-ml-get-categories-execute-async
client.machineLearning().getCategoriesAsync(request, RequestOptions.DEFAULT, listener); // <1>
// end::x-pack-ml-get-categories-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
} }

View File

@ -0,0 +1,62 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.job.config.JobTests;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class DeleteForecastRequestTests extends AbstractXContentTestCase<DeleteForecastRequest> {
@Override
protected DeleteForecastRequest createTestInstance() {
DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest(JobTests.randomValidJobId());
if (randomBoolean()) {
int length = randomInt(10);
List<String> ids = new ArrayList<>(length);
for(int i = 0; i < length; i++) {
ids.add(randomAlphaOfLength(10));
}
deleteForecastRequest.setForecastIds(ids);
}
if (randomBoolean()) {
deleteForecastRequest.setAllowNoForecasts(randomBoolean());
}
if (randomBoolean()) {
deleteForecastRequest.timeout(randomTimeValue());
}
return deleteForecastRequest;
}
@Override
protected DeleteForecastRequest doParseInstance(XContentParser parser) throws IOException {
return DeleteForecastRequest.PARSER.apply(parser, null);
}
@Override
protected boolean supportsUnknownFields() {
return false;
}
}

View File

@ -0,0 +1,51 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.job.util.PageParams;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
public class GetCategoriesRequestTests extends AbstractXContentTestCase<GetCategoriesRequest> {
@Override
protected GetCategoriesRequest createTestInstance() {
GetCategoriesRequest request = new GetCategoriesRequest(randomAlphaOfLengthBetween(1, 20));
if (randomBoolean()) {
request.setCategoryId(randomNonNegativeLong());
} else {
int from = randomInt(10000);
int size = randomInt(10000);
request.setPageParams(new PageParams(from, size));
}
return request;
}
@Override
protected GetCategoriesRequest doParseInstance(XContentParser parser) throws IOException {
return GetCategoriesRequest.PARSER.apply(parser, null);
}
@Override
protected boolean supportsUnknownFields() {
return false;
}
}

View File

@ -0,0 +1,53 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.job.results.CategoryDefinition;
import org.elasticsearch.client.ml.job.results.CategoryDefinitionTests;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class GetCategoriesResponseTests extends AbstractXContentTestCase<GetCategoriesResponse> {
@Override
protected GetCategoriesResponse createTestInstance() {
String jobId = randomAlphaOfLength(20);
int listSize = randomInt(10);
List<CategoryDefinition> categories = new ArrayList<>(listSize);
for (int j = 0; j < listSize; j++) {
CategoryDefinition category = CategoryDefinitionTests.createTestInstance(jobId);
categories.add(category);
}
return new GetCategoriesResponse(categories, listSize);
}
@Override
protected GetCategoriesResponse doParseInstance(XContentParser parser) throws IOException {
return GetCategoriesResponse.fromXContent(parser);
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
}

View File

@ -0,0 +1,43 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.datafeed.DatafeedConfig;
import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
public class PutDatafeedRequestTests extends AbstractXContentTestCase<PutDatafeedRequest> {
@Override
protected PutDatafeedRequest createTestInstance() {
return new PutDatafeedRequest(DatafeedConfigTests.createRandom());
}
@Override
protected PutDatafeedRequest doParseInstance(XContentParser parser) {
return new PutDatafeedRequest(DatafeedConfig.PARSER.apply(parser, null).build());
}
@Override
protected boolean supportsUnknownFields() {
return false;
}
}

View File

@ -0,0 +1,49 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ml;
import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.function.Predicate;
public class PutDatafeedResponseTests extends AbstractXContentTestCase<PutDatafeedResponse> {
@Override
protected PutDatafeedResponse createTestInstance() {
return new PutDatafeedResponse(DatafeedConfigTests.createRandom());
}
@Override
protected PutDatafeedResponse doParseInstance(XContentParser parser) throws IOException {
return PutDatafeedResponse.fromXContent(parser);
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
return field -> !field.isEmpty();
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.client.ml.datafeed; package org.elasticsearch.client.ml.datafeed;
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.DeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry;
@ -27,7 +26,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder;
@ -36,19 +34,26 @@ import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.List; import java.util.List;
public class DatafeedConfigTests extends AbstractXContentTestCase<DatafeedConfig> { public class DatafeedConfigTests extends AbstractXContentTestCase<DatafeedConfig> {
@Override @Override
protected DatafeedConfig createTestInstance() { protected DatafeedConfig createTestInstance() {
return createRandom();
}
public static DatafeedConfig createRandom() {
long bucketSpanMillis = 3600000; long bucketSpanMillis = 3600000;
DatafeedConfig.Builder builder = constructBuilder(); DatafeedConfig.Builder builder = constructBuilder();
builder.setIndices(randomStringList(1, 10)); builder.setIndices(randomStringList(1, 10));
builder.setTypes(randomStringList(0, 10)); builder.setTypes(randomStringList(0, 10));
if (randomBoolean()) { if (randomBoolean()) {
try {
builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10)));
} catch (IOException e) {
throw new RuntimeException("Failed to serialize query", e);
}
} }
boolean addScriptFields = randomBoolean(); boolean addScriptFields = randomBoolean();
if (addScriptFields) { if (addScriptFields) {
@ -72,7 +77,11 @@ public class DatafeedConfigTests extends AbstractXContentTestCase<DatafeedConfig
MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time");
aggs.addAggregator(AggregationBuilders.dateHistogram("buckets") aggs.addAggregator(AggregationBuilders.dateHistogram("buckets")
.interval(aggHistogramInterval).subAggregation(maxTime).field("time")); .interval(aggHistogramInterval).subAggregation(maxTime).field("time"));
try {
builder.setAggregations(aggs); builder.setAggregations(aggs);
} catch (IOException e) {
throw new RuntimeException("failed to serialize aggs", e);
}
} }
if (randomBoolean()) { if (randomBoolean()) {
builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE)); builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE));
@ -93,12 +102,6 @@ public class DatafeedConfigTests extends AbstractXContentTestCase<DatafeedConfig
return builder.build(); return builder.build();
} }
@Override
protected NamedXContentRegistry xContentRegistry() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
return new NamedXContentRegistry(searchModule.getNamedXContents());
}
public static List<String> randomStringList(int min, int max) { public static List<String> randomStringList(int min, int max) {
int size = scaledRandomIntBetween(min, max); int size = scaledRandomIntBetween(min, max);
List<String> list = new ArrayList<>(); List<String> list = new ArrayList<>();
@ -150,21 +153,6 @@ public class DatafeedConfigTests extends AbstractXContentTestCase<DatafeedConfig
expectThrows(NullPointerException.class, () -> new DatafeedConfig.Builder(randomValidDatafeedId(), null)); expectThrows(NullPointerException.class, () -> new DatafeedConfig.Builder(randomValidDatafeedId(), null));
} }
public void testCheckValid_GivenNullIndices() {
DatafeedConfig.Builder conf = constructBuilder();
expectThrows(NullPointerException.class, () -> conf.setIndices(null));
}
public void testCheckValid_GivenNullType() {
DatafeedConfig.Builder conf = constructBuilder();
expectThrows(NullPointerException.class, () -> conf.setTypes(null));
}
public void testCheckValid_GivenNullQuery() {
DatafeedConfig.Builder conf = constructBuilder();
expectThrows(NullPointerException.class, () -> conf.setQuery(null));
}
public static String randomValidDatafeedId() { public static String randomValidDatafeedId() {
CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray()); CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz".toCharArray());
return generator.ofCodePointsLength(random(), 10, 10); return generator.ofCodePointsLength(random(), 10, 10);

View File

@ -18,19 +18,16 @@
*/ */
package org.elasticsearch.client.ml.datafeed; package org.elasticsearch.client.ml.datafeed;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.List; import java.util.List;
public class DatafeedUpdateTests extends AbstractXContentTestCase<DatafeedUpdate> { public class DatafeedUpdateTests extends AbstractXContentTestCase<DatafeedUpdate> {
@ -54,7 +51,11 @@ public class DatafeedUpdateTests extends AbstractXContentTestCase<DatafeedUpdate
builder.setTypes(DatafeedConfigTests.randomStringList(1, 10)); builder.setTypes(DatafeedConfigTests.randomStringList(1, 10));
} }
if (randomBoolean()) { if (randomBoolean()) {
try {
builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10))); builder.setQuery(QueryBuilders.termQuery(randomAlphaOfLength(10), randomAlphaOfLength(10)));
} catch (IOException e) {
throw new RuntimeException("Failed to serialize query", e);
}
} }
if (randomBoolean()) { if (randomBoolean()) {
int scriptsSize = randomInt(3); int scriptsSize = randomInt(3);
@ -71,7 +72,11 @@ public class DatafeedUpdateTests extends AbstractXContentTestCase<DatafeedUpdate
// Testing with a single agg is ok as we don't have special list xcontent logic // Testing with a single agg is ok as we don't have special list xcontent logic
AggregatorFactories.Builder aggs = new AggregatorFactories.Builder(); AggregatorFactories.Builder aggs = new AggregatorFactories.Builder();
aggs.addAggregator(AggregationBuilders.avg(randomAlphaOfLength(10)).field(randomAlphaOfLength(10))); aggs.addAggregator(AggregationBuilders.avg(randomAlphaOfLength(10)).field(randomAlphaOfLength(10)));
try {
builder.setAggregations(aggs); builder.setAggregations(aggs);
} catch (IOException e) {
throw new RuntimeException("Failed to serialize aggs", e);
}
} }
if (randomBoolean()) { if (randomBoolean()) {
builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE)); builder.setScrollSize(randomIntBetween(0, Integer.MAX_VALUE));
@ -91,11 +96,4 @@ public class DatafeedUpdateTests extends AbstractXContentTestCase<DatafeedUpdate
protected boolean supportsUnknownFields() { protected boolean supportsUnknownFields() {
return false; return false;
} }
@Override
protected NamedXContentRegistry xContentRegistry() {
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
return new NamedXContentRegistry(searchModule.getNamedXContents());
}
} }

View File

@ -25,7 +25,7 @@ import java.util.Arrays;
public class CategoryDefinitionTests extends AbstractXContentTestCase<CategoryDefinition> { public class CategoryDefinitionTests extends AbstractXContentTestCase<CategoryDefinition> {
public CategoryDefinition createTestInstance(String jobId) { public static CategoryDefinition createTestInstance(String jobId) {
CategoryDefinition categoryDefinition = new CategoryDefinition(jobId); CategoryDefinition categoryDefinition = new CategoryDefinition(jobId);
categoryDefinition.setCategoryId(randomLong()); categoryDefinition.setCategoryId(randomLong());
categoryDefinition.setTerms(randomAlphaOfLength(10)); categoryDefinition.setTerms(randomAlphaOfLength(10));

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.unconfigurednodename; package org.elasticsearch.test.rest;
import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase; import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase;

View File

@ -57,6 +57,8 @@ integTestCluster {
// TODO: remove this for 7.0, this exists to allow the doc examples in 6.x to continue using the defaults // TODO: remove this for 7.0, this exists to allow the doc examples in 6.x to continue using the defaults
systemProperty 'es.scripting.use_java_time', 'false' systemProperty 'es.scripting.use_java_time', 'false'
systemProperty 'es.scripting.update.ctx_in_params', 'false' systemProperty 'es.scripting.update.ctx_in_params', 'false'
//TODO: remove this once the cname is prepended to the address by default in 7.0
systemProperty 'es.http.cname_in_publish_address', 'true'
} }
// remove when https://github.com/elastic/elasticsearch/issues/31305 is fixed // remove when https://github.com/elastic/elasticsearch/issues/31305 is fixed

View File

@ -0,0 +1,78 @@
[[java-rest-high-x-pack-ml-delete-forecast]]
=== Delete Forecast API
The Delete Forecast API provides the ability to delete a {ml} job's
forecast in the cluster.
It accepts a `DeleteForecastRequest` object and responds
with an `AcknowledgedResponse` object.
[[java-rest-high-x-pack-ml-delete-forecast-request]]
==== Delete Forecast Request
A `DeleteForecastRequest` object gets created with an existing non-null `jobId`.
All other fields are optional for the request.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-request]
--------------------------------------------------
<1> Constructing a new request referencing an existing `jobId`
==== Optional Arguments
The following arguments are optional.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-request-options]
--------------------------------------------------
<1> Sets the specific forecastIds to delete, can be set to `_all` to indicate ALL forecasts for the given
`jobId`
<2> Set the timeout for the request to respond, default is 30 seconds
<3> Set the `allow_no_forecasts` option. When `true` no error will be returned if an `_all`
request finds no forecasts. It defaults to `true`
[[java-rest-high-x-pack-ml-delete-forecast-execution]]
==== Execution
The request can be executed through the `MachineLearningClient` contained
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-execute]
--------------------------------------------------
[[java-rest-high-x-pack-ml-delete-forecast-execution-async]]
==== Asynchronous Execution
The request can also be executed asynchronously:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-execute-async]
--------------------------------------------------
<1> The `DeleteForecastRequest` to execute and the `ActionListener` to use when
the execution completes
The method does not block and returns immediately. The passed `ActionListener` is used
to notify the caller of completion. A typical `ActionListener` for `AcknowledgedResponse` may
look like
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-listener]
--------------------------------------------------
<1> `onResponse` is called back when the action is completed successfully
<2> `onFailure` is called back when some unexpected error occurs
[[java-rest-high-x-pack-ml-delete-forecast-response]]
==== Delete Forecast Response
An `AcknowledgedResponse` contains an acknowledgement of the forecast(s) deletion
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-delete-forecast-response]
--------------------------------------------------
<1> `isAcknowledged()` indicates if the forecast was successfully deleted or not.

View File

@ -0,0 +1,83 @@
[[java-rest-high-x-pack-ml-get-categories]]
=== Get Categories API
The Get Categories API retrieves one or more category results.
It accepts a `GetCategoriesRequest` object and responds
with a `GetCategoriesResponse` object.
[[java-rest-high-x-pack-ml-get-categories-request]]
==== Get Categories Request
A `GetCategoriesRequest` object gets created with an existing non-null `jobId`.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-request]
--------------------------------------------------
<1> Constructing a new request referencing an existing `jobId`
==== Optional Arguments
The following arguments are optional:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-category-id]
--------------------------------------------------
<1> The id of the category to get. Otherwise it will return all categories.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-page]
--------------------------------------------------
<1> The page parameters `from` and `size`. `from` specifies the number of categories to skip.
`size` specifies the maximum number of categories to get. Defaults to `0` and `100` respectively.
[[java-rest-high-x-pack-ml-get-categories-execution]]
==== Execution
The request can be executed through the `MachineLearningClient` contained
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-execute]
--------------------------------------------------
[[java-rest-high-x-pack-ml-get-categories-execution-async]]
==== Asynchronous Execution
The request can also be executed asynchronously:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-execute-async]
--------------------------------------------------
<1> The `GetCategoriesRequest` to execute and the `ActionListener` to use when
the execution completes
The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back with the `onResponse` method
if the execution is successful or the `onFailure` method if the execution
failed.
A typical listener for `GetCategoriesResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-listener]
--------------------------------------------------
<1> `onResponse` is called back when the action is completed successfully
<2> `onFailure` is called back when some unexpected error occurs
[[java-rest-high-snapshot-ml-get-categories-response]]
==== Get Categories Response
The returned `GetCategoriesResponse` contains the requested categories:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-categories-response]
--------------------------------------------------
<1> The count of categories that were matched
<2> The categories retrieved

View File

@ -0,0 +1,124 @@
[[java-rest-high-x-pack-ml-put-datafeed]]
=== Put Datafeed API
The Put Datafeed API can be used to create a new {ml} datafeed
in the cluster. The API accepts a `PutDatafeedRequest` object
as a request and returns a `PutDatafeedResponse`.
[[java-rest-high-x-pack-ml-put-datafeed-request]]
==== Put Datafeed Request
A `PutDatafeedRequest` requires the following argument:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-request]
--------------------------------------------------
<1> The configuration of the {ml} datafeed to create
[[java-rest-high-x-pack-ml-put-datafeed-config]]
==== Datafeed Configuration
The `DatafeedConfig` object contains all the details about the {ml} datafeed
configuration.
A `DatafeedConfig` requires the following arguments:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config]
--------------------------------------------------
<1> The datafeed ID and the job ID
<2> The indices that contain the data to retrieve and feed into the job
==== Optional Arguments
The following arguments are optional:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-chunking-config]
--------------------------------------------------
<1> Specifies how data searches are split into time chunks.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-frequency]
--------------------------------------------------
<1> The interval at which scheduled queries are made while the datafeed runs in real time.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-query]
--------------------------------------------------
<1> A query to filter the search results by. Defaults to the `match_all` query.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-query-delay]
--------------------------------------------------
<1> The time interval behind real time that data is queried.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-script-fields]
--------------------------------------------------
<1> Allows the use of script fields.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-create-datafeed-config-set-scroll-size]
--------------------------------------------------
<1> The `size` parameter used in the searches.
[[java-rest-high-x-pack-ml-put-datafeed-execution]]
==== Execution
The Put Datafeed API can be executed through a `MachineLearningClient`
instance. Such an instance can be retrieved from a `RestHighLevelClient`
using the `machineLearning()` method:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-execute]
--------------------------------------------------
[[java-rest-high-x-pack-ml-put-datafeed-response]]
==== Response
The returned `PutDatafeedResponse` returns the full representation of
the new {ml} datafeed if it has been successfully created. This will
contain the creation time and other fields initialized using
default values:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-response]
--------------------------------------------------
<1> The created datafeed
[[java-rest-high-x-pack-ml-put-datafeed-async]]
==== Asynchronous Execution
This request can be executed asynchronously:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-execute-async]
--------------------------------------------------
<1> The `PutDatafeedRequest` to execute and the `ActionListener` to use when
the execution completes
The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back using the `onResponse` method
if the execution successfully completed or using the `onFailure` method if
it failed.
A typical listener for `PutDatafeedResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-datafeed-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed. The response is
provided as an argument
<2> Called in case of failure. The raised exception is provided as an argument

View File

@ -142,7 +142,7 @@ This request can be executed asynchronously:
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-job-execute-async] include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-put-job-execute-async]
-------------------------------------------------- --------------------------------------------------
<1> The `PutMlJobRequest` to execute and the `ActionListener` to use when <1> The `PutJobRequest` to execute and the `ActionListener` to use when
the execution completes the execution completes
The asynchronous method does not block and returns immediately. Once it is The asynchronous method does not block and returns immediately. Once it is

View File

@ -220,12 +220,15 @@ The Java High Level REST Client supports the following Machine Learning APIs:
* <<java-rest-high-x-pack-ml-flush-job>> * <<java-rest-high-x-pack-ml-flush-job>>
* <<java-rest-high-x-pack-ml-update-job>> * <<java-rest-high-x-pack-ml-update-job>>
* <<java-rest-high-x-pack-ml-get-job-stats>> * <<java-rest-high-x-pack-ml-get-job-stats>>
* <<java-rest-high-x-pack-ml-put-datafeed>>
* <<java-rest-high-x-pack-ml-forecast-job>> * <<java-rest-high-x-pack-ml-forecast-job>>
* <<java-rest-high-x-pack-ml-delete-forecast>>
* <<java-rest-high-x-pack-ml-get-buckets>> * <<java-rest-high-x-pack-ml-get-buckets>>
* <<java-rest-high-x-pack-ml-get-overall-buckets>> * <<java-rest-high-x-pack-ml-get-overall-buckets>>
* <<java-rest-high-x-pack-ml-get-records>> * <<java-rest-high-x-pack-ml-get-records>>
* <<java-rest-high-x-pack-ml-post-data>> * <<java-rest-high-x-pack-ml-post-data>>
* <<java-rest-high-x-pack-ml-get-influencers>> * <<java-rest-high-x-pack-ml-get-influencers>>
* <<java-rest-high-x-pack-ml-get-categories>>
include::ml/put-job.asciidoc[] include::ml/put-job.asciidoc[]
include::ml/get-job.asciidoc[] include::ml/get-job.asciidoc[]
@ -234,13 +237,16 @@ include::ml/open-job.asciidoc[]
include::ml/close-job.asciidoc[] include::ml/close-job.asciidoc[]
include::ml/update-job.asciidoc[] include::ml/update-job.asciidoc[]
include::ml/flush-job.asciidoc[] include::ml/flush-job.asciidoc[]
include::ml/put-datafeed.asciidoc[]
include::ml/get-job-stats.asciidoc[] include::ml/get-job-stats.asciidoc[]
include::ml/forecast-job.asciidoc[] include::ml/forecast-job.asciidoc[]
include::ml/delete-forecast.asciidoc[]
include::ml/get-buckets.asciidoc[] include::ml/get-buckets.asciidoc[]
include::ml/get-overall-buckets.asciidoc[] include::ml/get-overall-buckets.asciidoc[]
include::ml/get-records.asciidoc[] include::ml/get-records.asciidoc[]
include::ml/post-data.asciidoc[] include::ml/post-data.asciidoc[]
include::ml/get-influencers.asciidoc[] include::ml/get-influencers.asciidoc[]
include::ml/get-categories.asciidoc[]
== Migration APIs == Migration APIs

View File

@ -348,7 +348,7 @@ GET /_search
\... will sort the composite bucket in descending order when comparing values from the `date_histogram` source \... will sort the composite bucket in descending order when comparing values from the `date_histogram` source
and in ascending order when comparing values from the `terms` source. and in ascending order when comparing values from the `terms` source.
====== Missing bucket ==== Missing bucket
By default documents without a value for a given source are ignored. By default documents without a value for a given source are ignored.
It is possible to include them in the response by setting `missing_bucket` to It is possible to include them in the response by setting `missing_bucket` to

View File

@ -37,6 +37,8 @@ include::tokenfilters/multiplexer-tokenfilter.asciidoc[]
include::tokenfilters/condition-tokenfilter.asciidoc[] include::tokenfilters/condition-tokenfilter.asciidoc[]
include::tokenfilters/predicate-tokenfilter.asciidoc[]
include::tokenfilters/stemmer-tokenfilter.asciidoc[] include::tokenfilters/stemmer-tokenfilter.asciidoc[]
include::tokenfilters/stemmer-override-tokenfilter.asciidoc[] include::tokenfilters/stemmer-override-tokenfilter.asciidoc[]

View File

@ -0,0 +1,79 @@
[[analysis-predicatefilter-tokenfilter]]
=== Predicate Token Filter Script
The predicate_token_filter token filter takes a predicate script, and removes tokens that do
not match the predicate.
[float]
=== Options
[horizontal]
script:: a predicate script that determines whether or not the current token will
be emitted. Note that only inline scripts are supported.
[float]
=== Settings example
You can set it up like:
[source,js]
--------------------------------------------------
PUT /condition_example
{
"settings" : {
"analysis" : {
"analyzer" : {
"my_analyzer" : {
"tokenizer" : "standard",
"filter" : [ "my_script_filter" ]
}
},
"filter" : {
"my_script_filter" : {
"type" : "predicate_token_filter",
"script" : {
"source" : "token.getTerm().length() > 5" <1>
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
<1> This will emit tokens that are more than 5 characters long
And test it like:
[source,js]
--------------------------------------------------
POST /condition_example/_analyze
{
"analyzer" : "my_analyzer",
"text" : "What Flapdoodle"
}
--------------------------------------------------
// CONSOLE
// TEST[continued]
And it'd respond:
[source,js]
--------------------------------------------------
{
"tokens": [
{
"token": "Flapdoodle", <1>
"start_offset": 5,
"end_offset": 15,
"type": "<ALPHANUM>",
"position": 1 <2>
}
]
}
--------------------------------------------------
// TESTRESPONSE
<1> The token 'What' has been removed from the tokenstream because it does not
match the predicate.
<2> The position and offset values are unaffected by the removal of earlier tokens

View File

@ -40,3 +40,16 @@ will be removed in the future, thus requiring HTTP to always be enabled.
This setting has been removed, as disabling http pipelining support on the server This setting has been removed, as disabling http pipelining support on the server
provided little value. The setting `http.pipelining.max_events` can still be used to provided little value. The setting `http.pipelining.max_events` can still be used to
limit the number of pipelined requests in-flight. limit the number of pipelined requests in-flight.
==== Cross-cluster search settings renamed
The cross-cluster search remote cluster connection infrastructure is also used
in cross-cluster replication. This means that the setting names
`search.remote.*` used for configuring cross-cluster search belie the fact that
they also apply to other situations where a connection to a remote cluster as
used. Therefore, these settings have been renamed from `search.remote.*` to
`cluster.remote.*`. For backwards compatibility purposes, we will fallback to
`search.remote.*` if `cluster.remote.*` is not set. For any such settings stored
in the cluster state, or set on dynamic settings updates, we will automatically
upgrade the setting from `search.remote.*` to `cluster.remote.*`. The fallback
settings will be removed in 8.0.0.

View File

@ -207,6 +207,51 @@ repositories.url.allowed_urls: ["http://www.example.org/root/*", "https://*.mydo
URL repositories with `file:` URLs can only point to locations registered in the `path.repo` setting similar to URL repositories with `file:` URLs can only point to locations registered in the `path.repo` setting similar to
shared file system repository. shared file system repository.
[float]
[role="xpack"]
[testenv="basic"]
===== Source Only Repository
A source repository enables you to create minimal, source-only snapshots that take up to 50% less space on disk.
Source only snapshots contain stored fields and index metadata. They do not include index or doc values structures
and are not searchable when restored. After restoring a source-only snapshot, you must <<docs-reindex,reindex>>
the data into a new index.
Source repositories delegate to another snapshot repository for storage.
[IMPORTANT]
==================================================
Source only snapshots are only supported if the `_source` field is enabled and no source-filtering is applied.
When you restore a source only snapshot:
* The restored index is read-only and can only serve `match_all` search or scroll requests to enable reindexing.
* Queries other than `match_all` and `_get` requests are not supported.
* The mapping of the restored index is empty, but the original mapping is available from the types top
level `meta` element.
==================================================
When you create a source repository, you must specify the type and name of the delegate repository
where the snapshots will be stored:
[source,js]
-----------------------------------
PUT _snapshot/my_src_only_repository
{
"type": "source",
"settings": {
"delegate_type": "fs",
"location": "my_backup_location"
}
}
-----------------------------------
// CONSOLE
// TEST[continued]
[float] [float]
===== Repository plugins ===== Repository plugins

View File

@ -172,7 +172,7 @@ GET /_search
The example above creates a boolean query: The example above creates a boolean query:
`(ny OR (new AND york)) city)` `(ny OR (new AND york)) city`
that matches documents with the term `ny` or the conjunction `new AND york`. that matches documents with the term `ny` or the conjunction `new AND york`.
By default the parameter `auto_generate_synonyms_phrase_query` is set to `true`. By default the parameter `auto_generate_synonyms_phrase_query` is set to `true`.

View File

@ -13,6 +13,9 @@ Every context mapping has a unique name and a type. There are two types: `catego
and `geo`. Context mappings are configured under the `contexts` parameter in and `geo`. Context mappings are configured under the `contexts` parameter in
the field mapping. the field mapping.
NOTE: It is mandatory to provide a context when indexing and querying
a context enabled completion field.
The following defines types, each with two context mappings for a completion The following defines types, each with two context mappings for a completion
field: field:
@ -84,10 +87,6 @@ PUT place_path_category
NOTE: Adding context mappings increases the index size for completion field. The completion index NOTE: Adding context mappings increases the index size for completion field. The completion index
is entirely heap resident, you can monitor the completion field index size using <<indices-stats>>. is entirely heap resident, you can monitor the completion field index size using <<indices-stats>>.
NOTE: deprecated[7.0.0, Indexing a suggestion without context on a context enabled completion field is deprecated
and will be removed in the next major release. If you want to index a suggestion that matches all contexts you should
add a special context for it.]
[[suggester-context-category]] [[suggester-context-category]]
[float] [float]
==== Category Context ==== Category Context
@ -160,9 +159,9 @@ POST place/_search?pretty
// CONSOLE // CONSOLE
// TEST[continued] // TEST[continued]
Note: deprecated[7.0.0, When no categories are provided at query-time, all indexed documents are considered. NOTE: If multiple categories or category contexts are set on the query
Querying with no categories on a category enabled completion field is deprecated and will be removed in the next major release they are merged as a disjunction. This means that suggestions match
as it degrades search performance considerably.] if they contain at least one of the provided context values.
Suggestions with certain categories can be boosted higher than others. Suggestions with certain categories can be boosted higher than others.
The following filters suggestions by categories and additionally boosts The following filters suggestions by categories and additionally boosts
@ -218,6 +217,9 @@ multiple category context clauses. The following parameters are supported for a
so on, by specifying a category prefix of 'type'. so on, by specifying a category prefix of 'type'.
Defaults to `false` Defaults to `false`
NOTE: If a suggestion entry matches multiple contexts the final score is computed as the
maximum score produced by any matching contexts.
[[suggester-context-geo]] [[suggester-context-geo]]
[float] [float]
==== Geo location Context ==== Geo location Context
@ -307,6 +309,10 @@ POST place/_search
NOTE: When a location with a lower precision at query time is specified, all suggestions NOTE: When a location with a lower precision at query time is specified, all suggestions
that fall within the area will be considered. that fall within the area will be considered.
NOTE: If multiple categories or category contexts are set on the query
they are merged as a disjunction. This means that suggestions match
if they contain at least one of the provided context values.
Suggestions that are within an area represented by a geohash can also be boosted higher Suggestions that are within an area represented by a geohash can also be boosted higher
than others, as shown by the following: than others, as shown by the following:
@ -349,6 +355,9 @@ POST place/_search?pretty
that fall under the geohash representation of '(43.6624803, -79.3863353)' that fall under the geohash representation of '(43.6624803, -79.3863353)'
with a default precision of '6' by a factor of `2` with a default precision of '6' by a factor of `2`
NOTE: If a suggestion entry matches multiple contexts the final score is computed as the
maximum score produced by any matching contexts.
In addition to accepting context values, a context query can be composed of In addition to accepting context values, a context query can be composed of
multiple context clauses. The following parameters are supported for a multiple context clauses. The following parameters are supported for a
`category` context clause: `category` context clause:

View File

@ -295,8 +295,9 @@ as _properties_ within Windows Installer documentation) that can be passed to `m
`SKIPSETTINGPASSWORDS`:: `SKIPSETTINGPASSWORDS`::
When installing with a `Trial` license and X-Pack Security enabled, whether the When installing with a `Trial` license and {security} enabled, whether the
installation should skip setting up the built-in users `elastic`, `kibana` and `logstash_system`. installation should skip setting up the built-in users `elastic`, `kibana`,
`logstash_system`, `apm_system`, and `beats_system`.
Defaults to `false` Defaults to `false`
`ELASTICUSERPASSWORD`:: `ELASTICUSERPASSWORD`::

View File

@ -20,6 +20,7 @@ package org.elasticsearch.core.internal.io;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.nio.channels.FileChannel; import java.nio.channels.FileChannel;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileVisitResult; import java.nio.file.FileVisitResult;
import java.nio.file.FileVisitor; import java.nio.file.FileVisitor;
import java.nio.file.Files; import java.nio.file.Files;
@ -36,6 +37,14 @@ import java.util.Map;
*/ */
public final class IOUtils { public final class IOUtils {
/**
* UTF-8 charset string.
* <p>Where possible, use {@link StandardCharsets#UTF_8} instead,
* as using the String constant may slow things down.
* @see StandardCharsets#UTF_8
*/
public static final String UTF_8 = StandardCharsets.UTF_8.name();
private IOUtils() { private IOUtils() {
// Static utils methods // Static utils methods
} }

View File

@ -19,6 +19,13 @@
package org.elasticsearch.analysis.common; package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.AttributeSource;
import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptContext;
/** /**
@ -30,21 +37,40 @@ public abstract class AnalysisPredicateScript {
* Encapsulation of the state of the current token * Encapsulation of the state of the current token
*/ */
public static class Token { public static class Token {
public CharSequence term;
public int pos; private final CharTermAttribute termAtt;
public int posInc; private final PositionIncrementAttribute posIncAtt;
public int posLen; private final PositionLengthAttribute posLenAtt;
public int startOffset; private final OffsetAttribute offsetAtt;
public int endOffset; private final TypeAttribute typeAtt;
public String type; private final KeywordAttribute keywordAtt;
public boolean isKeyword;
// posInc is always 1 at the beginning of a tokenstream and the convention
// from the _analyze endpoint is that tokenstream positions are 0-based
private int pos = -1;
/**
* Create a token exposing values from an AttributeSource
*/
public Token(AttributeSource source) {
this.termAtt = source.addAttribute(CharTermAttribute.class);
this.posIncAtt = source.addAttribute(PositionIncrementAttribute.class);
this.posLenAtt = source.addAttribute(PositionLengthAttribute.class);
this.offsetAtt = source.addAttribute(OffsetAttribute.class);
this.typeAtt = source.addAttribute(TypeAttribute.class);
this.keywordAtt = source.addAttribute(KeywordAttribute.class);
}
public void updatePosition() {
this.pos = this.pos + posIncAtt.getPositionIncrement();
}
public CharSequence getTerm() { public CharSequence getTerm() {
return term; return termAtt;
} }
public int getPositionIncrement() { public int getPositionIncrement() {
return posInc; return posIncAtt.getPositionIncrement();
} }
public int getPosition() { public int getPosition() {
@ -52,23 +78,23 @@ public abstract class AnalysisPredicateScript {
} }
public int getPositionLength() { public int getPositionLength() {
return posLen; return posLenAtt.getPositionLength();
} }
public int getStartOffset() { public int getStartOffset() {
return startOffset; return offsetAtt.startOffset();
} }
public int getEndOffset() { public int getEndOffset() {
return endOffset; return offsetAtt.endOffset();
} }
public String getType() { public String getType() {
return type; return typeAtt.type();
} }
public boolean isKeyword() { public boolean isKeyword() {
return isKeyword; return keywordAtt.isKeyword();
} }
} }

View File

@ -264,6 +264,8 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin, Scri
filters.put("pattern_replace", requiresAnalysisSettings(PatternReplaceTokenFilterFactory::new)); filters.put("pattern_replace", requiresAnalysisSettings(PatternReplaceTokenFilterFactory::new));
filters.put("persian_normalization", PersianNormalizationFilterFactory::new); filters.put("persian_normalization", PersianNormalizationFilterFactory::new);
filters.put("porter_stem", PorterStemTokenFilterFactory::new); filters.put("porter_stem", PorterStemTokenFilterFactory::new);
filters.put("predicate_token_filter",
requiresAnalysisSettings((i, e, n, s) -> new PredicateTokenFilterScriptFactory(i, n, s, scriptService.get())));
filters.put("remove_duplicates", RemoveDuplicatesTokenFilterFactory::new); filters.put("remove_duplicates", RemoveDuplicatesTokenFilterFactory::new);
filters.put("reverse", ReverseTokenFilterFactory::new); filters.put("reverse", ReverseTokenFilterFactory::new);
filters.put("russian_stem", RussianStemTokenFilterFactory::new); filters.put("russian_stem", RussianStemTokenFilterFactory::new);

View File

@ -0,0 +1,73 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.FilteringTokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptType;
import java.io.IOException;
/**
* A factory for creating FilteringTokenFilters that determine whether or not to
* accept their underlying token by consulting a script
*/
public class PredicateTokenFilterScriptFactory extends AbstractTokenFilterFactory {
private final AnalysisPredicateScript.Factory factory;
public PredicateTokenFilterScriptFactory(IndexSettings indexSettings, String name, Settings settings, ScriptService scriptService) {
super(indexSettings, name, settings);
Settings scriptSettings = settings.getAsSettings("script");
Script script = Script.parse(scriptSettings);
if (script.getType() != ScriptType.INLINE) {
throw new IllegalArgumentException("Cannot use stored scripts in tokenfilter [" + name + "]");
}
this.factory = scriptService.compile(script, AnalysisPredicateScript.CONTEXT);
}
@Override
public TokenStream create(TokenStream tokenStream) {
return new ScriptFilteringTokenFilter(tokenStream, factory.newInstance());
}
private static class ScriptFilteringTokenFilter extends FilteringTokenFilter {
final AnalysisPredicateScript script;
final AnalysisPredicateScript.Token token;
ScriptFilteringTokenFilter(TokenStream in, AnalysisPredicateScript script) {
super(in);
this.script = script;
this.token = new AnalysisPredicateScript.Token(this);
}
@Override
protected boolean accept() throws IOException {
token.updatePosition();
return script.execute(token);
}
}
}

View File

@ -21,12 +21,6 @@ package org.elasticsearch.analysis.common;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.ConditionalTokenFilter; import org.apache.lucene.analysis.miscellaneous.ConditionalTokenFilter;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.KeywordAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
@ -36,6 +30,7 @@ import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.ScriptType;
import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -76,30 +71,26 @@ public class ScriptedConditionTokenFilterFactory extends AbstractTokenFilterFact
} }
return in; return in;
}; };
AnalysisPredicateScript script = factory.newInstance(); return new ScriptedConditionTokenFilter(tokenStream, filter, factory.newInstance());
final AnalysisPredicateScript.Token token = new AnalysisPredicateScript.Token(); }
return new ConditionalTokenFilter(tokenStream, filter) {
CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); private static class ScriptedConditionTokenFilter extends ConditionalTokenFilter {
PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
PositionLengthAttribute posLenAtt = addAttribute(PositionLengthAttribute.class); private final AnalysisPredicateScript script;
OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class); private final AnalysisPredicateScript.Token token;
TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
KeywordAttribute keywordAtt = addAttribute(KeywordAttribute.class); ScriptedConditionTokenFilter(TokenStream input, Function<TokenStream, TokenStream> inputFactory,
AnalysisPredicateScript script) {
super(input, inputFactory);
this.script = script;
this.token = new AnalysisPredicateScript.Token(this);
}
@Override @Override
protected boolean shouldFilter() { protected boolean shouldFilter() throws IOException {
token.term = termAtt; token.updatePosition();
token.posInc = posIncAtt.getPositionIncrement();
token.pos += token.posInc;
token.posLen = posLenAtt.getPositionLength();
token.startOffset = offsetAtt.startOffset();
token.endOffset = offsetAtt.endOffset();
token.type = typeAtt.type();
token.isKeyword = keywordAtt.isKeyword();
return script.execute(token); return script.execute(token);
} }
};
} }
@Override @Override

View File

@ -0,0 +1,89 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.analysis.common;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.TestEnvironment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import java.io.IOException;
import java.util.Collections;
public class PredicateTokenScriptFilterTests extends ESTokenStreamTestCase {
public void testSimpleFilter() throws IOException {
Settings settings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
Settings indexSettings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("index.analysis.filter.f.type", "predicate_token_filter")
.put("index.analysis.filter.f.script.source", "token.getTerm().length() > 5")
.put("index.analysis.analyzer.myAnalyzer.type", "custom")
.put("index.analysis.analyzer.myAnalyzer.tokenizer", "standard")
.putList("index.analysis.analyzer.myAnalyzer.filter", "f")
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings);
AnalysisPredicateScript.Factory factory = () -> new AnalysisPredicateScript() {
@Override
public boolean execute(Token token) {
return token.getTerm().length() > 5;
}
};
@SuppressWarnings("unchecked")
ScriptService scriptService = new ScriptService(indexSettings, Collections.emptyMap(), Collections.emptyMap()){
@Override
public <FactoryType> FactoryType compile(Script script, ScriptContext<FactoryType> context) {
assertEquals(context, AnalysisPredicateScript.CONTEXT);
assertEquals(new Script("token.getTerm().length() > 5"), script);
return (FactoryType) factory;
}
};
CommonAnalysisPlugin plugin = new CommonAnalysisPlugin();
plugin.createComponents(null, null, null, null, scriptService, null, null, null, null);
AnalysisModule module
= new AnalysisModule(TestEnvironment.newEnvironment(settings), Collections.singletonList(plugin));
IndexAnalyzers analyzers = module.getAnalysisRegistry().build(idxSettings);
try (NamedAnalyzer analyzer = analyzers.get("myAnalyzer")) {
assertNotNull(analyzer);
assertAnalyzesTo(analyzer, "Vorsprung Durch Technik", new String[]{
"Vorsprung", "Technik"
});
}
}
}

View File

@ -28,9 +28,44 @@
- type: condition - type: condition
filter: [ "lowercase" ] filter: [ "lowercase" ]
script: script:
source: "token.position > 1 && token.positionIncrement > 0 && token.startOffset > 0 && token.endOffset > 0 && (token.positionLength == 1 || token.type == \"a\" || token.keyword)" source: "token.position >= 1 && token.positionIncrement > 0 && token.startOffset > 0 && token.endOffset > 0 && (token.positionLength == 1 || token.type == \"a\" || token.keyword)"
- length: { tokens: 3 } - length: { tokens: 3 }
- match: { tokens.0.token: "Vorsprung" } - match: { tokens.0.token: "Vorsprung" }
- match: { tokens.1.token: "durch" } - match: { tokens.1.token: "durch" }
- match: { tokens.2.token: "technik" } - match: { tokens.2.token: "technik" }
---
"script_filter":
- do:
indices.analyze:
body:
text: "Vorsprung Durch Technik"
tokenizer: "whitespace"
filter:
- type: predicate_token_filter
script:
source: "token.term.length() > 5"
- length: { tokens: 2 }
- match: { tokens.0.token: "Vorsprung" }
- match: { tokens.1.token: "Technik" }
---
"script_filter_position":
- do:
indices.analyze:
body:
text: "a b c d e f g h"
tokenizer: "whitespace"
filter:
- type: predicate_token_filter
script:
source: "token.position >= 4"
- length: { tokens: 4 }
- match: { tokens.0.token: "e" }
- match: { tokens.1.token: "f" }
- match: { tokens.2.token: "g" }
- match: { tokens.3.token: "h" }

View File

@ -26,6 +26,7 @@ integTestCluster {
module project.project(':modules:mapper-extras') module project.project(':modules:mapper-extras')
systemProperty 'es.scripting.use_java_time', 'true' systemProperty 'es.scripting.use_java_time', 'true'
systemProperty 'es.scripting.update.ctx_in_params', 'false' systemProperty 'es.scripting.update.ctx_in_params', 'false'
systemProperty 'es.http.cname_in_publish_address', 'true'
} }
dependencies { dependencies {

View File

@ -32,19 +32,23 @@ esplugin {
} }
versions << [ versions << [
'aws': '1.11.223' 'aws': '1.11.406'
] ]
dependencies { dependencies {
compile "com.amazonaws:aws-java-sdk-s3:${versions.aws}" compile "com.amazonaws:aws-java-sdk-s3:${versions.aws}"
compile "com.amazonaws:aws-java-sdk-kms:${versions.aws}" compile "com.amazonaws:aws-java-sdk-kms:${versions.aws}"
compile "com.amazonaws:aws-java-sdk-core:${versions.aws}" compile "com.amazonaws:aws-java-sdk-core:${versions.aws}"
compile "com.amazonaws:jmespath-java:${versions.aws}"
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
compile "commons-logging:commons-logging:${versions.commonslogging}" compile "commons-logging:commons-logging:${versions.commonslogging}"
compile "commons-codec:commons-codec:${versions.commonscodec}" compile "commons-codec:commons-codec:${versions.commonscodec}"
compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
compile 'com.fasterxml.jackson.core:jackson-databind:2.6.7.1' compile 'com.fasterxml.jackson.core:jackson-databind:2.6.7.1'
compile 'com.fasterxml.jackson.core:jackson-annotations:2.6.0' compile 'com.fasterxml.jackson.core:jackson-annotations:2.6.0'
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
compile 'joda-time:joda-time:2.10'
// HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here,
// and whitelist this hack in JarHell // and whitelist this hack in JarHell
@ -53,6 +57,7 @@ dependencies {
dependencyLicenses { dependencyLicenses {
mapping from: /aws-java-sdk-.*/, to: 'aws-java-sdk' mapping from: /aws-java-sdk-.*/, to: 'aws-java-sdk'
mapping from: /jmespath-java.*/, to: 'aws-java-sdk'
mapping from: /jackson-.*/, to: 'jackson' mapping from: /jackson-.*/, to: 'jackson'
mapping from: /jaxb-.*/, to: 'jaxb' mapping from: /jaxb-.*/, to: 'jaxb'
} }

View File

@ -1 +0,0 @@
c3993cb44f5856fa721b7b7ccfc266377c0bf9c0

View File

@ -0,0 +1 @@
43f3b7332d4d527bbf34d4ac6be094f3dabec6de

View File

@ -1 +0,0 @@
c24e6ebe108c60a08098aeaad5ae0b6a5a77b618

View File

@ -0,0 +1 @@
e29854e58dc20f5453c1da7e580a5921b1e9714a

View File

@ -1 +0,0 @@
c2ef96732e22d97952fbcd0a94f1dc376d157eda

View File

@ -0,0 +1 @@
5c3c2c57b076602b3aeef841c63e5848ec52b00d

View File

@ -0,0 +1 @@
06c291d1029943d4968a36fadffa3b71a6d8b4e4

View File

@ -23,10 +23,12 @@ import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper; import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.http.IdleConnectionReaper; import com.amazonaws.http.IdleConnectionReaper;
import com.amazonaws.internal.StaticCredentialsProvider; import com.amazonaws.internal.StaticCredentialsProvider;
import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client; import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.internal.Constants;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.MapBuilder;
@ -93,19 +95,26 @@ class S3Service extends AbstractComponent implements Closeable {
} }
} }
private AmazonS3 buildClient(S3ClientSettings clientSettings) {
final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings);
final ClientConfiguration configuration = buildConfiguration(clientSettings);
final AmazonS3 client = buildClient(credentials, configuration);
if (Strings.hasText(clientSettings.endpoint)) {
client.setEndpoint(clientSettings.endpoint);
}
return client;
}
// proxy for testing // proxy for testing
AmazonS3 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { AmazonS3 buildClient(final S3ClientSettings clientSettings) {
return new AmazonS3Client(credentials, configuration); final AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard();
builder.withCredentials(buildCredentials(logger, clientSettings));
builder.withClientConfiguration(buildConfiguration(clientSettings));
final String endpoint = Strings.hasLength(clientSettings.endpoint) ? clientSettings.endpoint : Constants.S3_HOSTNAME;
logger.debug("using endpoint [{}]", endpoint);
// If the endpoint configuration isn't set on the builder then the default behaviour is to try
// and work out what region we are in and use an appropriate endpoint - see AwsClientBuilder#setRegion.
// In contrast, directly-constructed clients use s3.amazonaws.com unless otherwise instructed. We currently
// use a directly-constructed client, and need to keep the existing behaviour to avoid a breaking change,
// so to move to using the builder we must set it explicitly to keep the existing behaviour.
//
// We do this because directly constructing the client is deprecated (was already deprecated in 1.1.223 too)
// so this change removes that usage of a deprecated API.
builder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, null));
return builder.build();
} }
// pkg private for tests // pkg private for tests

View File

@ -19,7 +19,6 @@
package org.elasticsearch.repositories.s3; package org.elasticsearch.repositories.s3;
import com.amazonaws.ClientConfiguration;
import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3;
@ -70,9 +69,9 @@ public class RepositoryCredentialsTests extends ESTestCase {
} }
@Override @Override
AmazonS3 buildClient(AWSCredentialsProvider credentials, ClientConfiguration configuration) { AmazonS3 buildClient(final S3ClientSettings clientSettings) {
final AmazonS3 client = super.buildClient(credentials, configuration); final AmazonS3 client = super.buildClient(clientSettings);
return new ClientAndCredentials(client, credentials); return new ClientAndCredentials(client, buildCredentials(logger, clientSettings));
} }
} }

View File

@ -53,9 +53,6 @@ for (Version version : bwcVersions.indexCompatible) {
// some tests rely on the translog not being flushed // some tests rely on the translog not being flushed
setting 'indices.memory.shard_inactive_time', '20m' setting 'indices.memory.shard_inactive_time', '20m'
// debug logging for testRecovery
setting 'logger.level', 'DEBUG'
if (version.onOrAfter('5.3.0')) { if (version.onOrAfter('5.3.0')) {
setting 'http.content_type.required', 'true' setting 'http.content_type.required', 'true'
} }
@ -75,9 +72,6 @@ for (Version version : bwcVersions.indexCompatible) {
// some tests rely on the translog not being flushed // some tests rely on the translog not being flushed
setting 'indices.memory.shard_inactive_time', '20m' setting 'indices.memory.shard_inactive_time', '20m'
// debug logging for testRecovery
setting 'logger.level', 'DEBUG'
numNodes = 2 numNodes = 2
dataDir = { nodeNum -> oldClusterTest.nodes[nodeNum].dataDir } dataDir = { nodeNum -> oldClusterTest.nodes[nodeNum].dataDir }
cleanShared = false // We want to keep snapshots made by the old cluster! cleanShared = false // We want to keep snapshots made by the old cluster!

View File

@ -68,10 +68,8 @@ import static org.hamcrest.Matchers.notNullValue;
* version is started with the same data directories and then this is rerun * version is started with the same data directories and then this is rerun
* with {@code tests.is_old_cluster} set to {@code false}. * with {@code tests.is_old_cluster} set to {@code false}.
*/ */
public class FullClusterRestartIT extends ESRestTestCase { public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); private final boolean supportsLenientBooleans = getOldClusterVersion().before(Version.V_6_0_0_alpha1);
private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version"));
private final boolean supportsLenientBooleans = oldClusterVersion.before(Version.V_6_0_0_alpha1);
private static final Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0"); private static final Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0");
private String index; private String index;
@ -81,29 +79,9 @@ public class FullClusterRestartIT extends ESRestTestCase {
index = getTestName().toLowerCase(Locale.ROOT); index = getTestName().toLowerCase(Locale.ROOT);
} }
@Override
protected boolean preserveIndicesUponCompletion() {
return true;
}
@Override
protected boolean preserveSnapshotsUponCompletion() {
return true;
}
@Override
protected boolean preserveReposUponCompletion() {
return true;
}
@Override
protected boolean preserveTemplatesUponCompletion() {
return true;
}
public void testSearch() throws Exception { public void testSearch() throws Exception {
int count; int count;
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder(); XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject(); mappingsAndSettings.startObject();
{ {
@ -169,7 +147,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
} }
public void testNewReplicasWork() throws Exception { public void testNewReplicasWork() throws Exception {
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder(); XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject(); mappingsAndSettings.startObject();
{ {
@ -237,10 +215,10 @@ public class FullClusterRestartIT extends ESRestTestCase {
*/ */
public void testAliasWithBadName() throws Exception { public void testAliasWithBadName() throws Exception {
assumeTrue("Can only test bad alias name if old cluster is on 5.1.0 or before", assumeTrue("Can only test bad alias name if old cluster is on 5.1.0 or before",
oldClusterVersion.before(VERSION_5_1_0_UNRELEASED)); getOldClusterVersion().before(VERSION_5_1_0_UNRELEASED));
int count; int count;
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder(); XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject(); mappingsAndSettings.startObject();
{ {
@ -291,7 +269,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
Map<String, Object> searchRsp = entityAsMap(client().performRequest(new Request("GET", "/" + aliasName + "/_search"))); Map<String, Object> searchRsp = entityAsMap(client().performRequest(new Request("GET", "/" + aliasName + "/_search")));
int totalHits = (int) XContentMapValues.extractValue("hits.total", searchRsp); int totalHits = (int) XContentMapValues.extractValue("hits.total", searchRsp);
assertEquals(count, totalHits); assertEquals(count, totalHits);
if (runningAgainstOldCluster == false) { if (isRunningAgainstOldCluster() == false) {
// We can remove the alias. // We can remove the alias.
Response response = client().performRequest(new Request("DELETE", "/" + index + "/_alias/" + aliasName)); Response response = client().performRequest(new Request("DELETE", "/" + index + "/_alias/" + aliasName));
assertEquals(200, response.getStatusLine().getStatusCode()); assertEquals(200, response.getStatusLine().getStatusCode());
@ -302,7 +280,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
} }
public void testClusterState() throws Exception { public void testClusterState() throws Exception {
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder(); XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject(); mappingsAndSettings.startObject();
mappingsAndSettings.field("template", index); mappingsAndSettings.field("template", index);
@ -341,14 +319,14 @@ public class FullClusterRestartIT extends ESRestTestCase {
assertEquals("0", numberOfReplicas); assertEquals("0", numberOfReplicas);
Version version = Version.fromId(Integer.valueOf((String) XContentMapValues.extractValue("metadata.indices." + index + Version version = Version.fromId(Integer.valueOf((String) XContentMapValues.extractValue("metadata.indices." + index +
".settings.index.version.created", clusterState))); ".settings.index.version.created", clusterState)));
assertEquals(oldClusterVersion, version); assertEquals(getOldClusterVersion(), version);
} }
public void testShrink() throws IOException { public void testShrink() throws IOException {
String shrunkenIndex = index + "_shrunk"; String shrunkenIndex = index + "_shrunk";
int numDocs; int numDocs;
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder(); XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject(); mappingsAndSettings.startObject();
{ {
@ -413,7 +391,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
public void testShrinkAfterUpgrade() throws IOException { public void testShrinkAfterUpgrade() throws IOException {
String shrunkenIndex = index + "_shrunk"; String shrunkenIndex = index + "_shrunk";
int numDocs; int numDocs;
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder(); XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject(); mappingsAndSettings.startObject();
{ {
@ -465,7 +443,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
int totalHits = (int) XContentMapValues.extractValue("hits.total", response); int totalHits = (int) XContentMapValues.extractValue("hits.total", response);
assertEquals(numDocs, totalHits); assertEquals(numDocs, totalHits);
if (runningAgainstOldCluster == false) { if (isRunningAgainstOldCluster() == false) {
response = entityAsMap(client().performRequest(new Request("GET", "/" + shrunkenIndex + "/_search"))); response = entityAsMap(client().performRequest(new Request("GET", "/" + shrunkenIndex + "/_search")));
assertNoFailures(response); assertNoFailures(response);
totalShards = (int) XContentMapValues.extractValue("_shards.total", response); totalShards = (int) XContentMapValues.extractValue("_shards.total", response);
@ -490,7 +468,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
* </ol> * </ol>
*/ */
public void testRollover() throws IOException { public void testRollover() throws IOException {
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
Request createIndex = new Request("PUT", "/" + index + "-000001"); Request createIndex = new Request("PUT", "/" + index + "-000001");
createIndex.setJsonEntity("{" createIndex.setJsonEntity("{"
+ " \"aliases\": {" + " \"aliases\": {"
@ -511,7 +489,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
bulkRequest.addParameter("refresh", ""); bulkRequest.addParameter("refresh", "");
assertThat(EntityUtils.toString(client().performRequest(bulkRequest).getEntity()), containsString("\"errors\":false")); assertThat(EntityUtils.toString(client().performRequest(bulkRequest).getEntity()), containsString("\"errors\":false"));
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
Request rolloverRequest = new Request("POST", "/" + index + "_write/_rollover"); Request rolloverRequest = new Request("POST", "/" + index + "_write/_rollover");
rolloverRequest.setJsonEntity("{" rolloverRequest.setJsonEntity("{"
+ " \"conditions\": {" + " \"conditions\": {"
@ -529,7 +507,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
Map<String, Object> count = entityAsMap(client().performRequest(countRequest)); Map<String, Object> count = entityAsMap(client().performRequest(countRequest));
assertNoFailures(count); assertNoFailures(count);
int expectedCount = bulkCount + (runningAgainstOldCluster ? 0 : bulkCount); int expectedCount = bulkCount + (isRunningAgainstOldCluster() ? 0 : bulkCount);
assertEquals(expectedCount, (int) XContentMapValues.extractValue("hits.total", count)); assertEquals(expectedCount, (int) XContentMapValues.extractValue("hits.total", count));
} }
@ -688,7 +666,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
String docLocation = "/" + index + "/doc/1"; String docLocation = "/" + index + "/doc/1";
String doc = "{\"test\": \"test\"}"; String doc = "{\"test\": \"test\"}";
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
Request createDoc = new Request("PUT", docLocation); Request createDoc = new Request("PUT", docLocation);
createDoc.setJsonEntity(doc); createDoc.setJsonEntity(doc);
client().performRequest(createDoc); client().performRequest(createDoc);
@ -703,7 +681,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
public void testEmptyShard() throws IOException { public void testEmptyShard() throws IOException {
final String index = "test_empty_shard"; final String index = "test_empty_shard";
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
Settings.Builder settings = Settings.builder() Settings.Builder settings = Settings.builder()
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
@ -726,7 +704,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
public void testRecovery() throws Exception { public void testRecovery() throws Exception {
int count; int count;
boolean shouldHaveTranslog; boolean shouldHaveTranslog;
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
count = between(200, 300); count = between(200, 300);
/* We've had bugs in the past where we couldn't restore /* We've had bugs in the past where we couldn't restore
* an index without a translog so we randomize whether * an index without a translog so we randomize whether
@ -772,7 +750,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
String countResponse = toStr(client().performRequest(countRequest)); String countResponse = toStr(client().performRequest(countRequest));
assertThat(countResponse, containsString("\"total\":" + count)); assertThat(countResponse, containsString("\"total\":" + count));
if (false == runningAgainstOldCluster) { if (false == isRunningAgainstOldCluster()) {
boolean restoredFromTranslog = false; boolean restoredFromTranslog = false;
boolean foundPrimary = false; boolean foundPrimary = false;
Request recoveryRequest = new Request("GET", "/_cat/recovery/" + index); Request recoveryRequest = new Request("GET", "/_cat/recovery/" + index);
@ -800,7 +778,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
assertEquals("mismatch while checking for translog recovery\n" + recoveryResponse, shouldHaveTranslog, restoredFromTranslog); assertEquals("mismatch while checking for translog recovery\n" + recoveryResponse, shouldHaveTranslog, restoredFromTranslog);
String currentLuceneVersion = Version.CURRENT.luceneVersion.toString(); String currentLuceneVersion = Version.CURRENT.luceneVersion.toString();
String bwcLuceneVersion = oldClusterVersion.luceneVersion.toString(); String bwcLuceneVersion = getOldClusterVersion().luceneVersion.toString();
if (shouldHaveTranslog && false == currentLuceneVersion.equals(bwcLuceneVersion)) { if (shouldHaveTranslog && false == currentLuceneVersion.equals(bwcLuceneVersion)) {
int numCurrentVersion = 0; int numCurrentVersion = 0;
int numBwcVersion = 0; int numBwcVersion = 0;
@ -840,7 +818,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
*/ */
public void testSnapshotRestore() throws IOException { public void testSnapshotRestore() throws IOException {
int count; int count;
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
// Create the index // Create the index
count = between(200, 300); count = between(200, 300);
indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject());
@ -860,7 +838,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
// Stick a routing attribute into to cluster settings so we can see it after the restore // Stick a routing attribute into to cluster settings so we can see it after the restore
Request addRoutingSettings = new Request("PUT", "/_cluster/settings"); Request addRoutingSettings = new Request("PUT", "/_cluster/settings");
addRoutingSettings.setJsonEntity( addRoutingSettings.setJsonEntity(
"{\"persistent\": {\"cluster.routing.allocation.exclude.test_attr\": \"" + oldClusterVersion + "\"}}"); "{\"persistent\": {\"cluster.routing.allocation.exclude.test_attr\": \"" + getOldClusterVersion() + "\"}}");
client().performRequest(addRoutingSettings); client().performRequest(addRoutingSettings);
// Stick a template into the cluster so we can see it after the restore // Stick a template into the cluster so we can see it after the restore
@ -885,7 +863,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
templateBuilder.startObject("alias2"); { templateBuilder.startObject("alias2"); {
templateBuilder.startObject("filter"); { templateBuilder.startObject("filter"); {
templateBuilder.startObject("term"); { templateBuilder.startObject("term"); {
templateBuilder.field("version", runningAgainstOldCluster ? oldClusterVersion : Version.CURRENT); templateBuilder.field("version", isRunningAgainstOldCluster() ? getOldClusterVersion() : Version.CURRENT);
} }
templateBuilder.endObject(); templateBuilder.endObject();
} }
@ -898,7 +876,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder)); createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder));
client().performRequest(createTemplateRequest); client().performRequest(createTemplateRequest);
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
// Create the repo // Create the repo
XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); { XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); {
repoConfig.field("type", "fs"); repoConfig.field("type", "fs");
@ -914,19 +892,19 @@ public class FullClusterRestartIT extends ESRestTestCase {
client().performRequest(createRepoRequest); client().performRequest(createRepoRequest);
} }
Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + (runningAgainstOldCluster ? "old_snap" : "new_snap")); Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + (isRunningAgainstOldCluster() ? "old_snap" : "new_snap"));
createSnapshot.addParameter("wait_for_completion", "true"); createSnapshot.addParameter("wait_for_completion", "true");
createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}");
client().performRequest(createSnapshot); client().performRequest(createSnapshot);
checkSnapshot("old_snap", count, oldClusterVersion); checkSnapshot("old_snap", count, getOldClusterVersion());
if (false == runningAgainstOldCluster) { if (false == isRunningAgainstOldCluster()) {
checkSnapshot("new_snap", count, Version.CURRENT); checkSnapshot("new_snap", count, Version.CURRENT);
} }
} }
public void testHistoryUUIDIsAdded() throws Exception { public void testHistoryUUIDIsAdded() throws Exception {
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder(); XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject(); mappingsAndSettings.startObject();
{ {
@ -1019,20 +997,14 @@ public class FullClusterRestartIT extends ESRestTestCase {
Request clusterSettingsRequest = new Request("GET", "/_cluster/settings"); Request clusterSettingsRequest = new Request("GET", "/_cluster/settings");
clusterSettingsRequest.addParameter("flat_settings", "true"); clusterSettingsRequest.addParameter("flat_settings", "true");
Map<String, Object> clusterSettingsResponse = entityAsMap(client().performRequest(clusterSettingsRequest)); Map<String, Object> clusterSettingsResponse = entityAsMap(client().performRequest(clusterSettingsRequest));
Map<String, Object> expectedClusterSettings = new HashMap<>(); @SuppressWarnings("unchecked") final Map<String, Object> persistentSettings =
expectedClusterSettings.put("transient", emptyMap()); (Map<String, Object>)clusterSettingsResponse.get("persistent");
expectedClusterSettings.put("persistent", assertThat(persistentSettings.get("cluster.routing.allocation.exclude.test_attr"), equalTo(getOldClusterVersion().toString()));
singletonMap("cluster.routing.allocation.exclude.test_attr", oldClusterVersion.toString()));
if (expectedClusterSettings.equals(clusterSettingsResponse) == false) {
NotEqualMessageBuilder builder = new NotEqualMessageBuilder();
builder.compareMaps(clusterSettingsResponse, expectedClusterSettings);
fail("settings don't match:\n" + builder.toString());
}
// Check that the template was restored successfully // Check that the template was restored successfully
Map<String, Object> getTemplateResponse = entityAsMap(client().performRequest(new Request("GET", "/_template/test_template"))); Map<String, Object> getTemplateResponse = entityAsMap(client().performRequest(new Request("GET", "/_template/test_template")));
Map<String, Object> expectedTemplate = new HashMap<>(); Map<String, Object> expectedTemplate = new HashMap<>();
if (runningAgainstOldCluster && oldClusterVersion.before(Version.V_6_0_0_beta1)) { if (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_6_0_0_beta1)) {
expectedTemplate.put("template", "evil_*"); expectedTemplate.put("template", "evil_*");
} else { } else {
expectedTemplate.put("index_patterns", singletonList("evil_*")); expectedTemplate.put("index_patterns", singletonList("evil_*"));

View File

@ -0,0 +1,101 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.upgrades;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.transport.RemoteClusterService;
import java.io.IOException;
import java.util.Collections;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.transport.RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_SEEDS;
import static org.elasticsearch.transport.RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE;
import static org.hamcrest.Matchers.equalTo;
public class FullClusterRestartSettingsUpgradeIT extends AbstractFullClusterRestartTestCase {
public void testRemoteClusterSettingsUpgraded() throws IOException {
assumeTrue("settings automatically upgraded since 6.5.0", getOldClusterVersion().before(Version.V_6_5_0));
if (isRunningAgainstOldCluster()) {
final Request putSettingsRequest = new Request("PUT", "/_cluster/settings");
try (XContentBuilder builder = jsonBuilder()) {
builder.startObject();
{
builder.startObject("persistent");
{
builder.field("search.remote.foo.skip_unavailable", true);
builder.field("search.remote.foo.seeds", Collections.singletonList("localhost:9200"));
}
builder.endObject();
}
builder.endObject();
putSettingsRequest.setJsonEntity(Strings.toString(builder));
}
client().performRequest(putSettingsRequest);
final Request getSettingsRequest = new Request("GET", "/_cluster/settings");
final Response response = client().performRequest(getSettingsRequest);
try (XContentParser parser = createParser(JsonXContent.jsonXContent, response.getEntity().getContent())) {
final ClusterGetSettingsResponse clusterGetSettingsResponse = ClusterGetSettingsResponse.fromXContent(parser);
final Settings settings = clusterGetSettingsResponse.getPersistentSettings();
assertTrue(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings));
assertTrue(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").get(settings));
assertTrue(SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings));
assertThat(
SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").get(settings),
equalTo(Collections.singletonList("localhost:9200")));
}
assertSettingDeprecationsAndWarnings(new Setting<?>[]{
SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo"),
SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo")});
} else {
final Request getSettingsRequest = new Request("GET", "/_cluster/settings");
final Response getSettingsResponse = client().performRequest(getSettingsRequest);
try (XContentParser parser = createParser(JsonXContent.jsonXContent, getSettingsResponse.getEntity().getContent())) {
final ClusterGetSettingsResponse clusterGetSettingsResponse = ClusterGetSettingsResponse.fromXContent(parser);
final Settings settings = clusterGetSettingsResponse.getPersistentSettings();
assertFalse(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings));
assertTrue(
settings.toString(),
RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").exists(settings));
assertTrue(RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE.getConcreteSettingForNamespace("foo").get(settings));
assertFalse(SEARCH_REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings));
assertTrue(RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").exists(settings));
assertThat(
RemoteClusterService.REMOTE_CLUSTERS_SEEDS.getConcreteSettingForNamespace("foo").get(settings),
equalTo(Collections.singletonList("localhost:9200")));
}
}
}
}

View File

@ -20,10 +20,8 @@
package org.elasticsearch.upgrades; package org.elasticsearch.upgrades;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
import org.elasticsearch.Version;
import org.elasticsearch.client.Request; import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.InputStreamStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
@ -48,7 +46,6 @@ import org.elasticsearch.index.query.SpanTermQueryBuilder;
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder; import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder;
import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.SearchModule;
import org.elasticsearch.test.rest.ESRestTestCase;
import java.io.ByteArrayInputStream; import java.io.ByteArrayInputStream;
import java.io.IOException; import java.io.IOException;
@ -71,7 +68,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
* The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the * The queries to test are specified in json format, which turns out to work because we tend break here rarely. If the
* json format of a query being tested here then feel free to change this. * json format of a query being tested here then feel free to change this.
*/ */
public class QueryBuilderBWCIT extends ESRestTestCase { public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase {
private static final List<Object[]> CANDIDATES = new ArrayList<>(); private static final List<Object[]> CANDIDATES = new ArrayList<>();
@ -145,32 +142,9 @@ public class QueryBuilderBWCIT extends ESRestTestCase {
CANDIDATES.add(new Object[]{"{\"query\": {" + querySource + "}}", expectedQb}); CANDIDATES.add(new Object[]{"{\"query\": {" + querySource + "}}", expectedQb});
} }
private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version"));
private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster"));
@Override
protected boolean preserveIndicesUponCompletion() {
return true;
}
@Override
protected boolean preserveSnapshotsUponCompletion() {
return true;
}
@Override
protected boolean preserveReposUponCompletion() {
return true;
}
@Override
protected boolean preserveTemplatesUponCompletion() {
return true;
}
public void testQueryBuilderBWC() throws Exception { public void testQueryBuilderBWC() throws Exception {
String index = "queries"; String index = "queries";
if (runningAgainstOldCluster) { if (isRunningAgainstOldCluster()) {
XContentBuilder mappingsAndSettings = jsonBuilder(); XContentBuilder mappingsAndSettings = jsonBuilder();
mappingsAndSettings.startObject(); mappingsAndSettings.startObject();
{ {
@ -230,7 +204,7 @@ public class QueryBuilderBWCIT extends ESRestTestCase {
byte[] qbSource = Base64.getDecoder().decode(queryBuilderStr); byte[] qbSource = Base64.getDecoder().decode(queryBuilderStr);
try (InputStream in = new ByteArrayInputStream(qbSource, 0, qbSource.length)) { try (InputStream in = new ByteArrayInputStream(qbSource, 0, qbSource.length)) {
try (StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in), registry)) { try (StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in), registry)) {
input.setVersion(oldClusterVersion); input.setVersion(getOldClusterVersion());
QueryBuilder queryBuilder = input.readNamedWriteable(QueryBuilder.class); QueryBuilder queryBuilder = input.readNamedWriteable(QueryBuilder.class);
assert in.read() == -1; assert in.read() == -1;
assertEquals(expectedQueryBuilder, queryBuilder); assertEquals(expectedQueryBuilder, queryBuilder);

View File

@ -139,12 +139,26 @@ setup:
features: warnings features: warnings
- do: - do:
warnings: warnings:
- 'Doc-value field [count] is not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with the doc value field in order to opt in for the future behaviour and ease the migration to 7.0.' - 'There are doc-value fields which are not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with a doc value field in order to opt in for the future behaviour and ease the migration to 7.0: [count]'
search: search:
body: body:
docvalue_fields: [ "count" ] docvalue_fields: [ "count" ]
- match: { hits.hits.0.fields.count: [1] } - match: { hits.hits.0.fields.count: [1] }
---
"multiple docvalue_fields":
- skip:
version: " - 6.3.99"
reason: format option was added in 6.4
features: warnings
- do:
warnings:
- 'There are doc-value fields which are not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with a doc value field in order to opt in for the future behaviour and ease the migration to 7.0: [count, include.field1.keyword]'
search:
body:
docvalue_fields: [ "count", "include.field1.keyword" ]
- match: { hits.hits.0.fields.count: [1] }
--- ---
"docvalue_fields as url param": "docvalue_fields as url param":
- skip: - skip:
@ -153,7 +167,7 @@ setup:
features: warnings features: warnings
- do: - do:
warnings: warnings:
- 'Doc-value field [count] is not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with the doc value field in order to opt in for the future behaviour and ease the migration to 7.0.' - 'There are doc-value fields which are not using a format. The output will change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass [format=use_field_mapping] with a doc value field in order to opt in for the future behaviour and ease the migration to 7.0: [count]'
search: search:
docvalue_fields: [ "count" ] docvalue_fields: [ "count" ]
- match: { hits.hits.0.fields.count: [1] } - match: { hits.hits.0.fields.count: [1] }

View File

@ -39,6 +39,7 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -171,6 +172,12 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
throw new IllegalArgumentException("cannot provide index.number_of_routing_shards on resize"); throw new IllegalArgumentException("cannot provide index.number_of_routing_shards on resize");
} }
} }
if (IndexSettings.INDEX_SOFT_DELETES_SETTING.exists(metaData.getSettings()) &&
IndexSettings.INDEX_SOFT_DELETES_SETTING.get(metaData.getSettings()) &&
IndexSettings.INDEX_SOFT_DELETES_SETTING.exists(targetIndexSettings) &&
IndexSettings.INDEX_SOFT_DELETES_SETTING.get(targetIndexSettings) == false) {
throw new IllegalArgumentException("Can't disable [index.soft_deletes.enabled] setting on resize");
}
String cause = resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT) + "_index"; String cause = resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT) + "_index";
targetIndex.cause(cause); targetIndex.cause(cause);
Settings.Builder settingsBuilder = Settings.builder().put(targetIndexSettings); Settings.Builder settingsBuilder = Settings.builder().put(targetIndexSettings);

View File

@ -22,7 +22,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.replication.ReplicationOperation; import org.elasticsearch.action.support.replication.ReplicationOperation;
import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.action.support.replication.TransportReplicationAction;
@ -171,13 +170,8 @@ public class TransportResyncReplicationAction extends TransportWriteAction<Resyn
@Override @Override
public void handleException(TransportException exp) { public void handleException(TransportException exp) {
final Throwable cause = exp.unwrapCause();
if (TransportActions.isShardNotAvailableException(cause)) {
logger.trace("primary became unavailable during resync, ignoring", exp);
} else {
listener.onFailure(exp); listener.onFailure(exp);
} }
}
}); });
} }

View File

@ -749,7 +749,8 @@ public class MetaDataCreateIndexService extends AbstractComponent {
} }
} else { } else {
final Predicate<String> sourceSettingsPredicate = final Predicate<String> sourceSettingsPredicate =
(s) -> (s.startsWith("index.similarity.") || s.startsWith("index.analysis.") || s.startsWith("index.sort.")) (s) -> (s.startsWith("index.similarity.") || s.startsWith("index.analysis.") ||
s.startsWith("index.sort.") || s.equals("index.soft_deletes.enabled"))
&& indexSettingsBuilder.keys().contains(s) == false; && indexSettingsBuilder.keys().contains(s) == false;
builder.put(sourceMetaData.getSettings().filter(sourceSettingsPredicate)); builder.put(sourceMetaData.getSettings().filter(sourceSettingsPredicate));
} }

View File

@ -27,7 +27,6 @@ import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.regex.Regex;
import java.util.AbstractMap;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
@ -54,7 +53,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
private final List<SettingUpdater<?>> settingUpdaters = new CopyOnWriteArrayList<>(); private final List<SettingUpdater<?>> settingUpdaters = new CopyOnWriteArrayList<>();
private final Map<String, Setting<?>> complexMatchers; private final Map<String, Setting<?>> complexMatchers;
private final Map<String, Setting<?>> keySettings; private final Map<String, Setting<?>> keySettings;
private final Map<Setting<?>, Function<Map.Entry<String, String>, Map.Entry<String, String>>> settingUpgraders; private final Map<Setting<?>, SettingUpgrader<?>> settingUpgraders;
private final Setting.Property scope; private final Setting.Property scope;
private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$"); private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$");
private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$"); private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$");
@ -70,12 +69,8 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
this.settingUpgraders = this.settingUpgraders =
Collections.unmodifiableMap( Collections.unmodifiableMap(
settingUpgraders settingUpgraders.stream().collect(Collectors.toMap(SettingUpgrader::getSetting, Function.identity())));
.stream()
.collect(
Collectors.toMap(
SettingUpgrader::getSetting,
u -> e -> new AbstractMap.SimpleEntry<>(u.getKey(e.getKey()), u.getValue(e.getValue())))));
this.scope = scope; this.scope = scope;
Map<String, Setting<?>> complexMatchers = new HashMap<>(); Map<String, Setting<?>> complexMatchers = new HashMap<>();
@ -786,15 +781,25 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
boolean changed = false; // track if any settings were upgraded boolean changed = false; // track if any settings were upgraded
for (final String key : settings.keySet()) { for (final String key : settings.keySet()) {
final Setting<?> setting = getRaw(key); final Setting<?> setting = getRaw(key);
final Function<Map.Entry<String, String>, Map.Entry<String, String>> upgrader = settingUpgraders.get(setting); final SettingUpgrader<?> upgrader = settingUpgraders.get(setting);
if (upgrader == null) { if (upgrader == null) {
// the setting does not have an upgrader, copy the setting // the setting does not have an upgrader, copy the setting
builder.copy(key, settings); builder.copy(key, settings);
} else { } else {
// the setting has an upgrader, so mark that we have changed a setting and apply the upgrade logic // the setting has an upgrader, so mark that we have changed a setting and apply the upgrade logic
changed = true; changed = true;
final Map.Entry<String, String> upgrade = upgrader.apply(new Entry(key, settings)); // noinspection ConstantConditions
builder.put(upgrade.getKey(), upgrade.getValue()); if (setting.getConcreteSetting(key).isListSetting()) {
final List<String> value = settings.getAsList(key);
final String upgradedKey = upgrader.getKey(key);
final List<String> upgradedValue = upgrader.getListValue(value);
builder.putList(upgradedKey, upgradedValue);
} else {
final String value = settings.get(key);
final String upgradedKey = upgrader.getKey(key);
final String upgradedValue = upgrader.getValue(value);
builder.put(upgradedKey, upgradedValue);
}
} }
} }
// we only return a new instance if there was an upgrade // we only return a new instance if there was an upgrade

View File

@ -443,6 +443,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING
))); )));
public static List<SettingUpgrader<?>> BUILT_IN_SETTING_UPGRADERS = Collections.emptyList(); public static List<SettingUpgrader<?>> BUILT_IN_SETTING_UPGRADERS = Collections.unmodifiableList(Arrays.asList(
RemoteClusterAware.SEARCH_REMOTE_CLUSTER_SEEDS_UPGRADER,
RemoteClusterAware.SEARCH_REMOTE_CLUSTERS_PROXY_UPGRADER,
RemoteClusterService.SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE_UPGRADER));
} }

View File

@ -345,6 +345,11 @@ public class Setting<T> implements ToXContentObject {
return false; return false;
} }
final boolean isListSetting() {
return this instanceof ListSetting;
}
boolean hasComplexMatcher() { boolean hasComplexMatcher() {
return isGroupSetting(); return isGroupSetting();
} }
@ -453,7 +458,7 @@ public class Setting<T> implements ToXContentObject {
* @return the raw string representation of the setting value * @return the raw string representation of the setting value
*/ */
String innerGetRaw(final Settings settings) { String innerGetRaw(final Settings settings) {
return settings.get(getKey(), defaultValue.apply(settings)); return settings.get(getKey(), defaultValue.apply(settings), isListSetting());
} }
/** Logs a deprecation warning if the setting is deprecated and used. */ /** Logs a deprecation warning if the setting is deprecated and used. */
@ -1305,7 +1310,6 @@ public class Setting<T> implements ToXContentObject {
} }
} }
} }
} }
static void logSettingUpdate(Setting setting, Settings current, Settings previous, Logger logger) { static void logSettingUpdate(Setting setting, Settings current, Settings previous, Logger logger) {

View File

@ -19,6 +19,8 @@
package org.elasticsearch.common.settings; package org.elasticsearch.common.settings;
import java.util.List;
/** /**
* Represents the logic to upgrade a setting. * Represents the logic to upgrade a setting.
* *
@ -51,4 +53,8 @@ public interface SettingUpgrader<T> {
return value; return value;
} }
default List<String> getListValue(final List<String> value) {
return value;
}
} }

View File

@ -245,6 +245,30 @@ public final class Settings implements ToXContentFragment {
return retVal == null ? defaultValue : retVal; return retVal == null ? defaultValue : retVal;
} }
/**
* Returns the setting value associated with the setting key. If it does not exists,
* returns the default value provided.
*/
String get(String setting, String defaultValue, boolean isList) {
Object value = settings.get(setting);
if (value != null) {
if (value instanceof List) {
if (isList == false) {
throw new IllegalArgumentException(
"Found list type value for setting [" + setting + "] but but did not expect a list for it."
);
}
} else if (isList) {
throw new IllegalArgumentException(
"Expected list type value for setting [" + setting + "] but found [" + value.getClass() + ']'
);
}
return toString(value);
} else {
return defaultValue;
}
}
/** /**
* Returns the setting value (as float) associated with the setting key. If it does not exists, * Returns the setting value (as float) associated with the setting key. If it does not exists,
* returns the default value provided. * returns the default value provided.

View File

@ -21,6 +21,7 @@ package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.SuppressForbidden;
import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutionException;
@ -30,8 +31,14 @@ import java.util.concurrent.TimeoutException;
public class FutureUtils { public class FutureUtils {
/**
* Cancel execution of this future without interrupting a running thread. See {@link Future#cancel(boolean)} for details.
*
* @param toCancel the future to cancel
* @return false if the future could not be cancelled, otherwise true
*/
@SuppressForbidden(reason = "Future#cancel()") @SuppressForbidden(reason = "Future#cancel()")
public static boolean cancel(Future<?> toCancel) { public static boolean cancel(@Nullable final Future<?> toCancel) {
if (toCancel != null) { if (toCancel != null) {
return toCancel.cancel(false); // this method is a forbidden API since it interrupts threads return toCancel.cancel(false); // this method is a forbidden API since it interrupts threads
} }

View File

@ -131,7 +131,7 @@ public class DiscoveryModule {
if (discoverySupplier == null) { if (discoverySupplier == null) {
throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]");
} }
Loggers.getLogger(getClass(), settings).info("using discovery type [{}]", discoveryType); Loggers.getLogger(getClass(), settings).info("using discovery type [{}] and host providers {}", discoveryType, hostsProviderNames);
discovery = Objects.requireNonNull(discoverySupplier.get()); discovery = Objects.requireNonNull(discoverySupplier.get());
} }

View File

@ -19,24 +19,46 @@
package org.elasticsearch.http; package org.elasticsearch.http;
import org.apache.logging.log4j.LogManager;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.network.InetAddresses;
import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException; import java.io.IOException;
import static org.elasticsearch.common.Booleans.parseBoolean;
public class HttpInfo implements Writeable, ToXContentFragment { public class HttpInfo implements Writeable, ToXContentFragment {
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(LogManager.getLogger(HttpInfo.class));
/** Whether to add hostname to publish host field when serializing. */
private static final boolean CNAME_IN_PUBLISH_HOST =
parseBoolean(System.getProperty("es.http.cname_in_publish_address"), false);
private final BoundTransportAddress address; private final BoundTransportAddress address;
private final long maxContentLength; private final long maxContentLength;
private final boolean cnameInPublishHost;
public HttpInfo(StreamInput in) throws IOException { public HttpInfo(StreamInput in) throws IOException {
address = BoundTransportAddress.readBoundTransportAddress(in); this(BoundTransportAddress.readBoundTransportAddress(in), in.readLong(), CNAME_IN_PUBLISH_HOST);
maxContentLength = in.readLong(); }
public HttpInfo(BoundTransportAddress address, long maxContentLength) {
this(address, maxContentLength, CNAME_IN_PUBLISH_HOST);
}
HttpInfo(BoundTransportAddress address, long maxContentLength, boolean cnameInPublishHost) {
this.address = address;
this.maxContentLength = maxContentLength;
this.cnameInPublishHost = cnameInPublishHost;
} }
@Override @Override
@ -45,11 +67,6 @@ public class HttpInfo implements Writeable, ToXContentFragment {
out.writeLong(maxContentLength); out.writeLong(maxContentLength);
} }
public HttpInfo(BoundTransportAddress address, long maxContentLength) {
this.address = address;
this.maxContentLength = maxContentLength;
}
static final class Fields { static final class Fields {
static final String HTTP = "http"; static final String HTTP = "http";
static final String BOUND_ADDRESS = "bound_address"; static final String BOUND_ADDRESS = "bound_address";
@ -62,7 +79,21 @@ public class HttpInfo implements Writeable, ToXContentFragment {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.HTTP); builder.startObject(Fields.HTTP);
builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses()); builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses());
builder.field(Fields.PUBLISH_ADDRESS, address.publishAddress().toString()); TransportAddress publishAddress = address.publishAddress();
String publishAddressString = publishAddress.toString();
String hostString = publishAddress.address().getHostString();
if (InetAddresses.isInetAddress(hostString) == false) {
if (cnameInPublishHost) {
publishAddressString = hostString + '/' + publishAddress.toString();
} else {
DEPRECATION_LOGGER.deprecated(
"[http.publish_host] was printed as [ip:port] instead of [hostname/ip:port]. "
+ "This format is deprecated and will change to [hostname/ip:port] in a future version. "
+ "Use -Des.http.cname_in_publish_address=true to enforce non-deprecated formatting."
);
}
}
builder.field(Fields.PUBLISH_ADDRESS, publishAddressString);
builder.humanReadableField(Fields.MAX_CONTENT_LENGTH_IN_BYTES, Fields.MAX_CONTENT_LENGTH, maxContentLength()); builder.humanReadableField(Fields.MAX_CONTENT_LENGTH_IN_BYTES, Fields.MAX_CONTENT_LENGTH, maxContentLength());
builder.endObject(); builder.endObject();
return builder; return builder;

View File

@ -661,7 +661,7 @@ public abstract class Engine implements Closeable {
} }
/** get commits stats for the last commit */ /** get commits stats for the last commit */
public CommitStats commitStats() { public final CommitStats commitStats() {
return new CommitStats(getLastCommittedSegmentInfos()); return new CommitStats(getLastCommittedSegmentInfos());
} }
@ -678,12 +678,6 @@ public abstract class Engine implements Closeable {
*/ */
public abstract void waitForOpsToComplete(long seqNo) throws InterruptedException; public abstract void waitForOpsToComplete(long seqNo) throws InterruptedException;
/**
* Reset the local checkpoint in the tracker to the given local checkpoint
* @param localCheckpoint the new checkpoint to be set
*/
public abstract void resetLocalCheckpoint(long localCheckpoint);
/** /**
* @return a {@link SeqNoStats} object, using local state and the supplied global checkpoint * @return a {@link SeqNoStats} object, using local state and the supplied global checkpoint
*/ */
@ -951,7 +945,9 @@ public abstract class Engine implements Closeable {
* *
* @return the commit Id for the resulting commit * @return the commit Id for the resulting commit
*/ */
public abstract CommitId flush() throws EngineException; public final CommitId flush() throws EngineException {
return flush(false, false);
}
/** /**
@ -1163,11 +1159,16 @@ public abstract class Engine implements Closeable {
PRIMARY, PRIMARY,
REPLICA, REPLICA,
PEER_RECOVERY, PEER_RECOVERY,
LOCAL_TRANSLOG_RECOVERY; LOCAL_TRANSLOG_RECOVERY,
LOCAL_RESET;
public boolean isRecovery() { public boolean isRecovery() {
return this == PEER_RECOVERY || this == LOCAL_TRANSLOG_RECOVERY; return this == PEER_RECOVERY || this == LOCAL_TRANSLOG_RECOVERY;
} }
boolean isFromTranslog() {
return this == LOCAL_TRANSLOG_RECOVERY || this == LOCAL_RESET;
}
} }
public Origin origin() { public Origin origin() {
@ -1593,7 +1594,7 @@ public abstract class Engine implements Closeable {
private final CheckedRunnable<IOException> onClose; private final CheckedRunnable<IOException> onClose;
private final IndexCommit indexCommit; private final IndexCommit indexCommit;
IndexCommitRef(IndexCommit indexCommit, CheckedRunnable<IOException> onClose) { public IndexCommitRef(IndexCommit indexCommit, CheckedRunnable<IOException> onClose) {
this.indexCommit = indexCommit; this.indexCommit = indexCommit;
this.onClose = onClose; this.onClose = onClose;
} }

View File

@ -21,6 +21,7 @@ package org.elasticsearch.index.engine;
/** /**
* Simple Engine Factory * Simple Engine Factory
*/ */
@FunctionalInterface
public interface EngineFactory { public interface EngineFactory {
Engine newReadWriteEngine(EngineConfig config); Engine newReadWriteEngine(EngineConfig config);

View File

@ -152,12 +152,6 @@ public class InternalEngine extends Engine {
private final SoftDeletesPolicy softDeletesPolicy; private final SoftDeletesPolicy softDeletesPolicy;
private final LastRefreshedCheckpointListener lastRefreshedCheckpointListener; private final LastRefreshedCheckpointListener lastRefreshedCheckpointListener;
/**
* How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this
* across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents
* being indexed/deleted.
*/
private final AtomicLong writingBytes = new AtomicLong();
private final AtomicBoolean trackTranslogLocation = new AtomicBoolean(false); private final AtomicBoolean trackTranslogLocation = new AtomicBoolean(false);
@Nullable @Nullable
@ -530,7 +524,7 @@ public class InternalEngine extends Engine {
/** Returns how many bytes we are currently moving from indexing buffer to segments on disk */ /** Returns how many bytes we are currently moving from indexing buffer to segments on disk */
@Override @Override
public long getWritingBytes() { public long getWritingBytes() {
return writingBytes.get(); return indexWriter.getFlushingBytes() + versionMap.getRefreshingBytes();
} }
/** /**
@ -735,6 +729,7 @@ public class InternalEngine extends Engine {
: "version: " + index.version() + " type: " + index.versionType(); : "version: " + index.version() + " type: " + index.versionType();
return true; return true;
case LOCAL_TRANSLOG_RECOVERY: case LOCAL_TRANSLOG_RECOVERY:
case LOCAL_RESET:
assert index.isRetry(); assert index.isRetry();
return true; // allow to optimize in order to update the max safe time stamp return true; // allow to optimize in order to update the max safe time stamp
default: default:
@ -833,7 +828,7 @@ public class InternalEngine extends Engine {
indexResult = new IndexResult( indexResult = new IndexResult(
plan.versionForIndexing, getPrimaryTerm(), plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); plan.versionForIndexing, getPrimaryTerm(), plan.seqNoForIndexing, plan.currentNotFoundOrDeleted);
} }
if (index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { if (index.origin().isFromTranslog() == false) {
final Translog.Location location; final Translog.Location location;
if (indexResult.getResultType() == Result.Type.SUCCESS) { if (indexResult.getResultType() == Result.Type.SUCCESS) {
location = translog.add(new Translog.Index(index, indexResult)); location = translog.add(new Translog.Index(index, indexResult));
@ -1173,7 +1168,7 @@ public class InternalEngine extends Engine {
deleteResult = new DeleteResult( deleteResult = new DeleteResult(
plan.versionOfDeletion, getPrimaryTerm(), plan.seqNoOfDeletion, plan.currentlyDeleted == false); plan.versionOfDeletion, getPrimaryTerm(), plan.seqNoOfDeletion, plan.currentlyDeleted == false);
} }
if (delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { if (delete.origin().isFromTranslog() == false) {
final Translog.Location location; final Translog.Location location;
if (deleteResult.getResultType() == Result.Type.SUCCESS) { if (deleteResult.getResultType() == Result.Type.SUCCESS) {
location = translog.add(new Translog.Delete(delete, deleteResult)); location = translog.add(new Translog.Delete(delete, deleteResult));
@ -1411,7 +1406,7 @@ public class InternalEngine extends Engine {
} }
} }
final NoOpResult noOpResult = failure != null ? new NoOpResult(getPrimaryTerm(), noOp.seqNo(), failure) : new NoOpResult(getPrimaryTerm(), noOp.seqNo()); final NoOpResult noOpResult = failure != null ? new NoOpResult(getPrimaryTerm(), noOp.seqNo(), failure) : new NoOpResult(getPrimaryTerm(), noOp.seqNo());
if (noOp.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { if (noOp.origin().isFromTranslog() == false) {
final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason())); final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason()));
noOpResult.setTranslogLocation(location); noOpResult.setTranslogLocation(location);
} }
@ -1437,9 +1432,6 @@ public class InternalEngine extends Engine {
// pass the new reader reference to the external reader manager. // pass the new reader reference to the external reader manager.
final long localCheckpointBeforeRefresh = getLocalCheckpoint(); final long localCheckpointBeforeRefresh = getLocalCheckpoint();
// this will also cause version map ram to be freed hence we always account for it.
final long bytes = indexWriter.ramBytesUsed() + versionMap.ramBytesUsedForRefresh();
writingBytes.addAndGet(bytes);
try (ReleasableLock lock = readLock.acquire()) { try (ReleasableLock lock = readLock.acquire()) {
ensureOpen(); ensureOpen();
if (store.tryIncRef()) { if (store.tryIncRef()) {
@ -1465,8 +1457,6 @@ public class InternalEngine extends Engine {
e.addSuppressed(inner); e.addSuppressed(inner);
} }
throw new RefreshFailedEngineException(shardId, e); throw new RefreshFailedEngineException(shardId, e);
} finally {
writingBytes.addAndGet(-bytes);
} }
assert lastRefreshedCheckpoint() >= localCheckpointBeforeRefresh : "refresh checkpoint was not advanced; " + assert lastRefreshedCheckpoint() >= localCheckpointBeforeRefresh : "refresh checkpoint was not advanced; " +
"local_checkpoint=" + localCheckpointBeforeRefresh + " refresh_checkpoint=" + lastRefreshedCheckpoint(); "local_checkpoint=" + localCheckpointBeforeRefresh + " refresh_checkpoint=" + lastRefreshedCheckpoint();
@ -1576,11 +1566,6 @@ public class InternalEngine extends Engine {
|| localCheckpointTracker.getCheckpoint() == localCheckpointTracker.getMaxSeqNo(); || localCheckpointTracker.getCheckpoint() == localCheckpointTracker.getMaxSeqNo();
} }
@Override
public CommitId flush() throws EngineException {
return flush(false, false);
}
@Override @Override
public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException { public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException {
ensureOpen(); ensureOpen();
@ -2340,11 +2325,6 @@ public class InternalEngine extends Engine {
localCheckpointTracker.waitForOpsToComplete(seqNo); localCheckpointTracker.waitForOpsToComplete(seqNo);
} }
@Override
public void resetLocalCheckpoint(long localCheckpoint) {
localCheckpointTracker.resetCheckpoint(localCheckpoint);
}
@Override @Override
public SeqNoStats getSeqNoStats(long globalCheckpoint) { public SeqNoStats getSeqNoStats(long globalCheckpoint) {
return localCheckpointTracker.getStats(globalCheckpoint); return localCheckpointTracker.getStats(globalCheckpoint);

View File

@ -434,6 +434,14 @@ final class LiveVersionMap implements ReferenceManager.RefreshListener, Accounta
return maps.current.ramBytesUsed.get(); return maps.current.ramBytesUsed.get();
} }
/**
* Returns how much RAM is current being freed up by refreshing. This is {@link #ramBytesUsed()}
* except does not include tombstones because they don't clear on refresh.
*/
long getRefreshingBytes() {
return maps.old.ramBytesUsed.get();
}
@Override @Override
public Collection<Accountable> getChildResources() { public Collection<Accountable> getChildResources() {
// TODO: useful to break down RAM usage here? // TODO: useful to break down RAM usage here?

View File

@ -0,0 +1,368 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.engine;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.index.SoftDeletesDirectoryReaderWrapper;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ReferenceManager;
import org.apache.lucene.search.SearcherManager;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.Lock;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.seqno.SeqNoStats;
import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.index.translog.TranslogStats;
import java.io.Closeable;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.stream.Stream;
/**
* A basic read-only engine that allows switching a shard to be true read-only temporarily or permanently.
* Note: this engine can be opened side-by-side with a read-write engine but will not reflect any changes made to the read-write
* engine.
*
* @see #ReadOnlyEngine(EngineConfig, SeqNoStats, TranslogStats, boolean, Function)
*/
public final class ReadOnlyEngine extends Engine {
private final SegmentInfos lastCommittedSegmentInfos;
private final SeqNoStats seqNoStats;
private final TranslogStats translogStats;
private final SearcherManager searcherManager;
private final IndexCommit indexCommit;
private final Lock indexWriterLock;
/**
* Creates a new ReadOnlyEngine. This ctor can also be used to open a read-only engine on top of an already opened
* read-write engine. It allows to optionally obtain the writer locks for the shard which would time-out if another
* engine is still open.
*
* @param config the engine configuration
* @param seqNoStats sequence number statistics for this engine or null if not provided
* @param translogStats translog stats for this engine or null if not provided
* @param obtainLock if <code>true</code> this engine will try to obtain the {@link IndexWriter#WRITE_LOCK_NAME} lock. Otherwise
* the lock won't be obtained
* @param readerWrapperFunction allows to wrap the index-reader for this engine.
*/
public ReadOnlyEngine(EngineConfig config, SeqNoStats seqNoStats, TranslogStats translogStats, boolean obtainLock,
Function<DirectoryReader, DirectoryReader> readerWrapperFunction) {
super(config);
try {
Store store = config.getStore();
store.incRef();
DirectoryReader reader = null;
Directory directory = store.directory();
Lock indexWriterLock = null;
boolean success = false;
try {
// we obtain the IW lock even though we never modify the index.
// yet this makes sure nobody else does. including some testing tools that try to be messy
indexWriterLock = obtainLock ? directory.obtainLock(IndexWriter.WRITE_LOCK_NAME) : null;
this.lastCommittedSegmentInfos = Lucene.readSegmentInfos(directory);
this.translogStats = translogStats == null ? new TranslogStats(0, 0, 0, 0, 0) : translogStats;
this.seqNoStats = seqNoStats == null ? buildSeqNoStats(lastCommittedSegmentInfos) : seqNoStats;
reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), config.getShardId());
if (config.getIndexSettings().isSoftDeleteEnabled()) {
reader = new SoftDeletesDirectoryReaderWrapper(reader, Lucene.SOFT_DELETES_FIELD);
}
reader = readerWrapperFunction.apply(reader);
this.indexCommit = reader.getIndexCommit();
this.searcherManager = new SearcherManager(reader,
new RamAccountingSearcherFactory(engineConfig.getCircuitBreakerService()));
this.indexWriterLock = indexWriterLock;
success = true;
} finally {
if (success == false) {
IOUtils.close(reader, indexWriterLock, store::decRef);
}
}
} catch (IOException e) {
throw new UncheckedIOException(e); // this is stupid
}
}
@Override
protected void closeNoLock(String reason, CountDownLatch closedLatch) {
if (isClosed.compareAndSet(false, true)) {
try {
IOUtils.close(searcherManager, indexWriterLock, store::decRef);
} catch (Exception ex) {
logger.warn("failed to close searcher", ex);
} finally {
closedLatch.countDown();
}
}
}
public static SeqNoStats buildSeqNoStats(SegmentInfos infos) {
final SequenceNumbers.CommitInfo seqNoStats =
SequenceNumbers.loadSeqNoInfoFromLuceneCommit(infos.userData.entrySet());
long maxSeqNo = seqNoStats.maxSeqNo;
long localCheckpoint = seqNoStats.localCheckpoint;
return new SeqNoStats(maxSeqNo, localCheckpoint, localCheckpoint);
}
@Override
public GetResult get(Get get, BiFunction<String, SearcherScope, Searcher> searcherFactory) throws EngineException {
return getFromSearcher(get, searcherFactory, SearcherScope.EXTERNAL);
}
@Override
protected ReferenceManager<IndexSearcher> getReferenceManager(SearcherScope scope) {
return searcherManager;
}
@Override
protected SegmentInfos getLastCommittedSegmentInfos() {
return lastCommittedSegmentInfos;
}
@Override
public String getHistoryUUID() {
return lastCommittedSegmentInfos.userData.get(Engine.HISTORY_UUID_KEY);
}
@Override
public long getWritingBytes() {
return 0;
}
@Override
public long getIndexThrottleTimeInMillis() {
return 0;
}
@Override
public boolean isThrottled() {
return false;
}
@Override
public IndexResult index(Index index) {
assert false : "this should not be called";
throw new UnsupportedOperationException("indexing is not supported on a read-only engine");
}
@Override
public DeleteResult delete(Delete delete) {
assert false : "this should not be called";
throw new UnsupportedOperationException("deletes are not supported on a read-only engine");
}
@Override
public NoOpResult noOp(NoOp noOp) {
assert false : "this should not be called";
throw new UnsupportedOperationException("no-ops are not supported on a read-only engine");
}
@Override
public boolean isTranslogSyncNeeded() {
return false;
}
@Override
public boolean ensureTranslogSynced(Stream<Translog.Location> locations) {
return false;
}
@Override
public void syncTranslog() {
}
@Override
public Closeable acquireRetentionLockForPeerRecovery() {
return () -> {};
}
@Override
public Translog.Snapshot newChangesSnapshot(String source, MapperService mapperService, long fromSeqNo, long toSeqNo,
boolean requiredFullRange) throws IOException {
return readHistoryOperations(source, mapperService, fromSeqNo);
}
@Override
public Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException {
return new Translog.Snapshot() {
@Override
public void close() { }
@Override
public int totalOperations() {
return 0;
}
@Override
public Translog.Operation next() {
return null;
}
};
}
@Override
public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException {
return 0;
}
@Override
public boolean hasCompleteOperationHistory(String source, MapperService mapperService, long startingSeqNo) throws IOException {
return false;
}
@Override
public TranslogStats getTranslogStats() {
return translogStats;
}
@Override
public Translog.Location getTranslogLastWriteLocation() {
return new Translog.Location(0,0,0);
}
@Override
public long getLocalCheckpoint() {
return seqNoStats.getLocalCheckpoint();
}
@Override
public void waitForOpsToComplete(long seqNo) {
}
@Override
public SeqNoStats getSeqNoStats(long globalCheckpoint) {
return new SeqNoStats(seqNoStats.getMaxSeqNo(), seqNoStats.getLocalCheckpoint(), globalCheckpoint);
}
@Override
public long getLastSyncedGlobalCheckpoint() {
return seqNoStats.getGlobalCheckpoint();
}
@Override
public long getIndexBufferRAMBytesUsed() {
return 0;
}
@Override
public List<Segment> segments(boolean verbose) {
return Arrays.asList(getSegmentInfo(lastCommittedSegmentInfos, verbose));
}
@Override
public void refresh(String source) {
// we could allow refreshes if we want down the road the searcher manager will then reflect changes to a rw-engine
// opened side-by-side
}
@Override
public void writeIndexingBuffer() throws EngineException {
}
@Override
public boolean shouldPeriodicallyFlush() {
return false;
}
@Override
public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) {
// we can't do synced flushes this would require an indexWriter which we don't have
throw new UnsupportedOperationException("syncedFlush is not supported on a read-only engine");
}
@Override
public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException {
return new CommitId(lastCommittedSegmentInfos.getId());
}
@Override
public void forceMerge(boolean flush, int maxNumSegments, boolean onlyExpungeDeletes,
boolean upgrade, boolean upgradeOnlyAncientSegments) {
}
@Override
public IndexCommitRef acquireLastIndexCommit(boolean flushFirst) {
store.incRef();
return new IndexCommitRef(indexCommit, store::decRef);
}
@Override
public IndexCommitRef acquireSafeIndexCommit() {
return acquireLastIndexCommit(false);
}
@Override
public void activateThrottling() {
}
@Override
public void deactivateThrottling() {
}
@Override
public void trimUnreferencedTranslogFiles() {
}
@Override
public boolean shouldRollTranslogGeneration() {
return false;
}
@Override
public void rollTranslogGeneration() {
}
@Override
public void restoreLocalCheckpointFromTranslog() {
}
@Override
public int fillSeqNoGaps(long primaryTerm) {
return 0;
}
@Override
public Engine recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo) {
return this;
}
@Override
public void skipTranslogRecovery() {
}
@Override
public void trimOperationsFromTranslog(long belowTerm, long aboveSeqNo) {
}
@Override
public void maybePruneDeletes() {
}
}

View File

@ -111,17 +111,6 @@ public abstract class MappedFieldType extends FieldType {
public boolean equals(Object o) { public boolean equals(Object o) {
if (!super.equals(o)) return false; if (!super.equals(o)) return false;
MappedFieldType fieldType = (MappedFieldType) o; MappedFieldType fieldType = (MappedFieldType) o;
// check similarity first because we need to check the name, and it might be null
// TODO: SimilarityProvider should have equals?
if (similarity == null || fieldType.similarity == null) {
if (similarity != fieldType.similarity) {
return false;
}
} else {
if (Objects.equals(similarity.name(), fieldType.similarity.name()) == false) {
return false;
}
}
return boost == fieldType.boost && return boost == fieldType.boost &&
docValues == fieldType.docValues && docValues == fieldType.docValues &&
@ -131,7 +120,8 @@ public abstract class MappedFieldType extends FieldType {
Objects.equals(searchQuoteAnalyzer(), fieldType.searchQuoteAnalyzer()) && Objects.equals(searchQuoteAnalyzer(), fieldType.searchQuoteAnalyzer()) &&
Objects.equals(eagerGlobalOrdinals, fieldType.eagerGlobalOrdinals) && Objects.equals(eagerGlobalOrdinals, fieldType.eagerGlobalOrdinals) &&
Objects.equals(nullValue, fieldType.nullValue) && Objects.equals(nullValue, fieldType.nullValue) &&
Objects.equals(nullValueAsString, fieldType.nullValueAsString); Objects.equals(nullValueAsString, fieldType.nullValueAsString) &&
Objects.equals(similarity, fieldType.similarity);
} }
@Override @Override

View File

@ -24,7 +24,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.search.function.ScoreFunction; import org.elasticsearch.common.lucene.search.function.ScoreFunction;
import org.elasticsearch.common.lucene.search.function.WeightFactorFunction; import org.elasticsearch.common.lucene.search.function.WeightFactorFunction;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
@ -46,7 +45,7 @@ public abstract class ScoreFunctionBuilder<FB extends ScoreFunctionBuilder<FB>>
* Read from a stream. * Read from a stream.
*/ */
public ScoreFunctionBuilder(StreamInput in) throws IOException { public ScoreFunctionBuilder(StreamInput in) throws IOException {
weight = in.readOptionalFloat(); weight = checkWeight(in.readOptionalFloat());
} }
@Override @Override
@ -70,10 +69,17 @@ public abstract class ScoreFunctionBuilder<FB extends ScoreFunctionBuilder<FB>>
*/ */
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public final FB setWeight(float weight) { public final FB setWeight(float weight) {
this.weight = weight; this.weight = checkWeight(weight);
return (FB) this; return (FB) this;
} }
private Float checkWeight(Float weight) {
if (weight != null && Float.compare(weight, 0) < 0) {
throw new IllegalArgumentException("[weight] cannot be negative for a filtering function");
}
return weight;
}
/** /**
* The weight applied to the function before combining. * The weight applied to the function before combining.
*/ */

View File

@ -109,6 +109,7 @@ public class LocalCheckpointTracker {
* @param checkpoint the local checkpoint to reset this tracker to * @param checkpoint the local checkpoint to reset this tracker to
*/ */
public synchronized void resetCheckpoint(final long checkpoint) { public synchronized void resetCheckpoint(final long checkpoint) {
// TODO: remove this method as after we restore the local history on promotion.
assert checkpoint != SequenceNumbers.UNASSIGNED_SEQ_NO; assert checkpoint != SequenceNumbers.UNASSIGNED_SEQ_NO;
assert checkpoint <= this.checkpoint; assert checkpoint <= this.checkpoint;
processedSeqNo.clear(); processedSeqNo.clear();

View File

@ -91,5 +91,4 @@ public class SeqNoStats implements ToXContentFragment, Writeable {
", globalCheckpoint=" + globalCheckpoint + ", globalCheckpoint=" + globalCheckpoint +
'}'; '}';
} }
} }

View File

@ -51,6 +51,4 @@ public abstract class AbstractIndexShardComponent implements IndexShardComponent
public String nodeName() { public String nodeName() {
return indexSettings.getNodeName(); return indexSettings.getNodeName();
} }
} }

View File

@ -21,13 +21,19 @@ package org.elasticsearch.index.shard;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.LinkedHashMap;
import java.util.List; import java.util.Map;
import java.util.Objects; import java.util.Objects;
import java.util.concurrent.Executor; import java.util.concurrent.Executor;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED;
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
@ -45,22 +51,24 @@ public class GlobalCheckpointListeners implements Closeable {
public interface GlobalCheckpointListener { public interface GlobalCheckpointListener {
/** /**
* Callback when the global checkpoint is updated or the shard is closed. If the shard is closed, the value of the global checkpoint * Callback when the global checkpoint is updated or the shard is closed. If the shard is closed, the value of the global checkpoint
* will be set to {@link org.elasticsearch.index.seqno.SequenceNumbers#UNASSIGNED_SEQ_NO} and the exception will be non-null. If the * will be set to {@link org.elasticsearch.index.seqno.SequenceNumbers#UNASSIGNED_SEQ_NO} and the exception will be non-null and an
* global checkpoint is updated, the exception will be null. * instance of {@link IndexShardClosedException }. If the listener timed out waiting for notification then the exception will be
* non-null and an instance of {@link TimeoutException}. If the global checkpoint is updated, the exception will be null.
* *
* @param globalCheckpoint the updated global checkpoint * @param globalCheckpoint the updated global checkpoint
* @param e if non-null, the shard is closed * @param e if non-null, the shard is closed or the listener timed out
*/ */
void accept(long globalCheckpoint, IndexShardClosedException e); void accept(long globalCheckpoint, Exception e);
} }
// guarded by this // guarded by this
private boolean closed; private boolean closed;
private volatile List<GlobalCheckpointListener> listeners; private Map<GlobalCheckpointListener, ScheduledFuture<?>> listeners;
private long lastKnownGlobalCheckpoint = UNASSIGNED_SEQ_NO; private long lastKnownGlobalCheckpoint = UNASSIGNED_SEQ_NO;
private final ShardId shardId; private final ShardId shardId;
private final Executor executor; private final Executor executor;
private final ScheduledExecutorService scheduler;
private final Logger logger; private final Logger logger;
/** /**
@ -68,15 +76,18 @@ public class GlobalCheckpointListeners implements Closeable {
* *
* @param shardId the shard ID on which global checkpoint updates can be listened to * @param shardId the shard ID on which global checkpoint updates can be listened to
* @param executor the executor for listener notifications * @param executor the executor for listener notifications
* @param scheduler the executor used for scheduling timeouts
* @param logger a shard-level logger * @param logger a shard-level logger
*/ */
GlobalCheckpointListeners( GlobalCheckpointListeners(
final ShardId shardId, final ShardId shardId,
final Executor executor, final Executor executor,
final ScheduledExecutorService scheduler,
final Logger logger) { final Logger logger) {
this.shardId = Objects.requireNonNull(shardId); this.shardId = Objects.requireNonNull(shardId, "shardId");
this.executor = Objects.requireNonNull(executor); this.executor = Objects.requireNonNull(executor, "executor");
this.logger = Objects.requireNonNull(logger); this.scheduler = Objects.requireNonNull(scheduler, "scheduler");
this.logger = Objects.requireNonNull(logger, "logger");
} }
/** /**
@ -84,12 +95,15 @@ public class GlobalCheckpointListeners implements Closeable {
* listener will be asynchronously notified on the executor used to construct this collection of global checkpoint listeners. If the * listener will be asynchronously notified on the executor used to construct this collection of global checkpoint listeners. If the
* shard is closed then the listener will be asynchronously notified on the executor used to construct this collection of global * shard is closed then the listener will be asynchronously notified on the executor used to construct this collection of global
* checkpoint listeners. The listener will only be notified of at most one event, either the global checkpoint is updated or the shard * checkpoint listeners. The listener will only be notified of at most one event, either the global checkpoint is updated or the shard
* is closed. A listener must re-register after one of these events to receive subsequent events. * is closed. A listener must re-register after one of these events to receive subsequent events. Callers may add a timeout to be
* notified after if the timeout elapses. In this case, the listener will be notified with a {@link TimeoutException}. Passing null for
* the timeout means no timeout will be associated to the listener.
* *
* @param currentGlobalCheckpoint the current global checkpoint known to the listener * @param currentGlobalCheckpoint the current global checkpoint known to the listener
* @param listener the listener * @param listener the listener
* @param timeout the listener timeout, or null if no timeout
*/ */
synchronized void add(final long currentGlobalCheckpoint, final GlobalCheckpointListener listener) { synchronized void add(final long currentGlobalCheckpoint, final GlobalCheckpointListener listener, final TimeValue timeout) {
if (closed) { if (closed) {
executor.execute(() -> notifyListener(listener, UNASSIGNED_SEQ_NO, new IndexShardClosedException(shardId))); executor.execute(() -> notifyListener(listener, UNASSIGNED_SEQ_NO, new IndexShardClosedException(shardId)));
return; return;
@ -97,12 +111,43 @@ public class GlobalCheckpointListeners implements Closeable {
if (lastKnownGlobalCheckpoint > currentGlobalCheckpoint) { if (lastKnownGlobalCheckpoint > currentGlobalCheckpoint) {
// notify directly // notify directly
executor.execute(() -> notifyListener(listener, lastKnownGlobalCheckpoint, null)); executor.execute(() -> notifyListener(listener, lastKnownGlobalCheckpoint, null));
return;
} else { } else {
if (listeners == null) { if (listeners == null) {
listeners = new ArrayList<>(); listeners = new LinkedHashMap<>();
}
if (timeout == null) {
listeners.put(listener, null);
} else {
listeners.put(
listener,
scheduler.schedule(
() -> {
final boolean removed;
synchronized (this) {
/*
* Note that the listeners map can be null if a notification nulled out the map reference when
* notifying listeners, and then our scheduled execution occurred before we could be cancelled by
* the notification. In this case, we would have blocked waiting for access to this critical
* section.
*
* What is more, we know that this listener has a timeout associated with it (otherwise we would
* not be here) so the return value from remove being null is an indication that we are not in the
* map. This can happen if a notification nulled out the listeners, and then our scheduled execution
* occurred before we could be cancelled by the notification, and then another thread added a
* listener causing the listeners map reference to be non-null again. In this case, our listener
* here would not be in the map and we should not fire the timeout logic.
*/
removed = listeners != null && listeners.remove(listener) != null;
}
if (removed) {
final TimeoutException e = new TimeoutException(timeout.getStringRep());
logger.trace("global checkpoint listener timed out", e);
executor.execute(() -> notifyListener(listener, UNASSIGNED_SEQ_NO, e));
}
},
timeout.nanos(),
TimeUnit.NANOSECONDS));
} }
listeners.add(listener);
} }
} }
@ -112,10 +157,25 @@ public class GlobalCheckpointListeners implements Closeable {
notifyListeners(UNASSIGNED_SEQ_NO, new IndexShardClosedException(shardId)); notifyListeners(UNASSIGNED_SEQ_NO, new IndexShardClosedException(shardId));
} }
/**
* The number of listeners currently pending for notification.
*
* @return the number of listeners pending notification
*/
synchronized int pendingListeners() { synchronized int pendingListeners() {
return listeners == null ? 0 : listeners.size(); return listeners == null ? 0 : listeners.size();
} }
/**
* The scheduled future for a listener that has a timeout associated with it, otherwise null.
*
* @param listener the listener to get the scheduled future for
* @return a scheduled future representing the timeout future for the listener, otherwise null
*/
synchronized ScheduledFuture<?> getTimeoutFuture(final GlobalCheckpointListener listener) {
return listeners.get(listener);
}
/** /**
* Invoke to notify all registered listeners of an updated global checkpoint. * Invoke to notify all registered listeners of an updated global checkpoint.
* *
@ -135,19 +195,24 @@ public class GlobalCheckpointListeners implements Closeable {
assert (globalCheckpoint == UNASSIGNED_SEQ_NO && e != null) || (globalCheckpoint >= NO_OPS_PERFORMED && e == null); assert (globalCheckpoint == UNASSIGNED_SEQ_NO && e != null) || (globalCheckpoint >= NO_OPS_PERFORMED && e == null);
if (listeners != null) { if (listeners != null) {
// capture the current listeners // capture the current listeners
final List<GlobalCheckpointListener> currentListeners = listeners; final Map<GlobalCheckpointListener, ScheduledFuture<?>> currentListeners = listeners;
listeners = null; listeners = null;
if (currentListeners != null) { if (currentListeners != null) {
executor.execute(() -> { executor.execute(() -> {
for (final GlobalCheckpointListener listener : currentListeners) { for (final Map.Entry<GlobalCheckpointListener, ScheduledFuture<?>> listener : currentListeners.entrySet()) {
notifyListener(listener, globalCheckpoint, e); /*
* We do not want to interrupt any timeouts that fired, these will detect that the listener has been notified and
* not trigger the timeout.
*/
FutureUtils.cancel(listener.getValue());
notifyListener(listener.getKey(), globalCheckpoint, e);
} }
}); });
} }
} }
} }
private void notifyListener(final GlobalCheckpointListener listener, final long globalCheckpoint, final IndexShardClosedException e) { private void notifyListener(final GlobalCheckpointListener listener, final long globalCheckpoint, final Exception e) {
try { try {
listener.accept(globalCheckpoint, e); listener.accept(globalCheckpoint, e);
} catch (final Exception caught) { } catch (final Exception caught) {
@ -157,8 +222,11 @@ public class GlobalCheckpointListeners implements Closeable {
"error notifying global checkpoint listener of updated global checkpoint [{}]", "error notifying global checkpoint listener of updated global checkpoint [{}]",
globalCheckpoint), globalCheckpoint),
caught); caught);
} else { } else if (e instanceof IndexShardClosedException) {
logger.warn("error notifying global checkpoint listener of closed shard", caught); logger.warn("error notifying global checkpoint listener of closed shard", caught);
} else {
assert e instanceof TimeoutException : e;
logger.warn("error notifying global checkpoint listener of timeout", caught);
} }
} }
} }

View File

@ -163,7 +163,6 @@ import java.util.stream.Collectors;
import java.util.stream.StreamSupport; import java.util.stream.StreamSupport;
import static org.elasticsearch.index.mapper.SourceToParse.source; import static org.elasticsearch.index.mapper.SourceToParse.source;
import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED;
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
public class IndexShard extends AbstractIndexShardComponent implements IndicesClusterStateService.Shard { public class IndexShard extends AbstractIndexShardComponent implements IndicesClusterStateService.Shard {
@ -303,7 +302,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
this.checkIndexOnStartup = indexSettings.getValue(IndexSettings.INDEX_CHECK_ON_STARTUP); this.checkIndexOnStartup = indexSettings.getValue(IndexSettings.INDEX_CHECK_ON_STARTUP);
this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays); this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, bigArrays);
final String aId = shardRouting.allocationId().getId(); final String aId = shardRouting.allocationId().getId();
this.globalCheckpointListeners = new GlobalCheckpointListeners(shardId, threadPool.executor(ThreadPool.Names.LISTENER), logger); this.globalCheckpointListeners =
new GlobalCheckpointListeners(shardId, threadPool.executor(ThreadPool.Names.LISTENER), threadPool.scheduler(), logger);
this.replicationTracker = this.replicationTracker =
new ReplicationTracker(shardId, aId, indexSettings, UNASSIGNED_SEQ_NO, globalCheckpointListeners::globalCheckpointUpdated); new ReplicationTracker(shardId, aId, indexSettings, UNASSIGNED_SEQ_NO, globalCheckpointListeners::globalCheckpointUpdated);
@ -1273,16 +1273,18 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
return result; return result;
} }
// package-private for testing /**
int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot) throws IOException { * Replays translog operations from the provided translog {@code snapshot} to the current engine using the given {@code origin}.
recoveryState.getTranslog().totalOperations(snapshot.totalOperations()); * The callback {@code onOperationRecovered} is notified after each translog operation is replayed successfully.
recoveryState.getTranslog().totalOperationsOnStart(snapshot.totalOperations()); */
int runTranslogRecovery(Engine engine, Translog.Snapshot snapshot, Engine.Operation.Origin origin,
Runnable onOperationRecovered) throws IOException {
int opsRecovered = 0; int opsRecovered = 0;
Translog.Operation operation; Translog.Operation operation;
while ((operation = snapshot.next()) != null) { while ((operation = snapshot.next()) != null) {
try { try {
logger.trace("[translog] recover op {}", operation); logger.trace("[translog] recover op {}", operation);
Engine.Result result = applyTranslogOperation(operation, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY); Engine.Result result = applyTranslogOperation(operation, origin);
switch (result.getResultType()) { switch (result.getResultType()) {
case FAILURE: case FAILURE:
throw result.getFailure(); throw result.getFailure();
@ -1295,7 +1297,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
} }
opsRecovered++; opsRecovered++;
recoveryState.getTranslog().incrementRecoveredOperations(); onOperationRecovered.run();
} catch (Exception e) { } catch (Exception e) {
if (ExceptionsHelper.status(e) == RestStatus.BAD_REQUEST) { if (ExceptionsHelper.status(e) == RestStatus.BAD_REQUEST) {
// mainly for MapperParsingException and Failure to detect xcontent // mainly for MapperParsingException and Failure to detect xcontent
@ -1313,8 +1315,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* Operations from the translog will be replayed to bring lucene up to date. * Operations from the translog will be replayed to bring lucene up to date.
**/ **/
public void openEngineAndRecoverFromTranslog() throws IOException { public void openEngineAndRecoverFromTranslog() throws IOException {
final RecoveryState.Translog translogRecoveryStats = recoveryState.getTranslog();
final Engine.TranslogRecoveryRunner translogRecoveryRunner = (engine, snapshot) -> {
translogRecoveryStats.totalOperations(snapshot.totalOperations());
translogRecoveryStats.totalOperationsOnStart(snapshot.totalOperations());
return runTranslogRecovery(engine, snapshot, Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY,
translogRecoveryStats::incrementRecoveredOperations);
};
innerOpenEngineAndTranslog(); innerOpenEngineAndTranslog();
getEngine().recoverFromTranslog(this::runTranslogRecovery, Long.MAX_VALUE); getEngine().recoverFromTranslog(translogRecoveryRunner, Long.MAX_VALUE);
} }
/** /**
@ -1352,11 +1361,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY);
final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID);
replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "read from translog checkpoint"); replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "read from translog checkpoint");
trimUnsafeCommits();
assertMaxUnsafeAutoIdInCommit();
final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUUID);
store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, config.getIndexSettings().getIndexVersionCreated());
createNewEngine(config); createNewEngine(config);
verifyNotClosed(); verifyNotClosed();
@ -1367,6 +1372,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage(); assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage();
} }
private void trimUnsafeCommits() throws IOException {
assert currentEngineReference.get() == null : "engine is running";
final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY);
final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID);
final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUUID);
assertMaxUnsafeAutoIdInCommit();
store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, indexSettings.getIndexVersionCreated());
}
private boolean assertSequenceNumbersInCommit() throws IOException { private boolean assertSequenceNumbersInCommit() throws IOException {
final Map<String, String> userData = SegmentInfos.readLatestCommit(store.directory()).getUserData(); final Map<String, String> userData = SegmentInfos.readLatestCommit(store.directory()).getUserData();
assert userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) : "commit point doesn't contains a local checkpoint"; assert userData.containsKey(SequenceNumbers.LOCAL_CHECKPOINT_KEY) : "commit point doesn't contains a local checkpoint";
@ -1463,7 +1477,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
if (origin == Engine.Operation.Origin.PRIMARY) { if (origin == Engine.Operation.Origin.PRIMARY) {
assert assertPrimaryMode(); assert assertPrimaryMode();
} else { } else {
assert origin == Engine.Operation.Origin.REPLICA; assert origin == Engine.Operation.Origin.REPLICA || origin == Engine.Operation.Origin.LOCAL_RESET;
assert assertReplicationTarget(); assert assertReplicationTarget();
} }
if (writeAllowedStates.contains(state) == false) { if (writeAllowedStates.contains(state) == false) {
@ -1768,15 +1782,18 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
/** /**
* Add a global checkpoint listener. If the global checkpoint is above the current global checkpoint known to the listener then the * Add a global checkpoint listener. If the global checkpoint is above the current global checkpoint known to the listener then the
* listener will fire immediately on the calling thread. * listener will fire immediately on the calling thread. If the specified timeout elapses before the listener is notified, the listener
* will be notified with an {@link TimeoutException}. A caller may pass null to specify no timeout.
* *
* @param currentGlobalCheckpoint the current global checkpoint known to the listener * @param currentGlobalCheckpoint the current global checkpoint known to the listener
* @param listener the listener * @param listener the listener
* @param timeout the timeout
*/ */
public void addGlobalCheckpointListener( public void addGlobalCheckpointListener(
final long currentGlobalCheckpoint, final long currentGlobalCheckpoint,
final GlobalCheckpointListeners.GlobalCheckpointListener listener) { final GlobalCheckpointListeners.GlobalCheckpointListener listener,
this.globalCheckpointListeners.add(currentGlobalCheckpoint, listener); final TimeValue timeout) {
this.globalCheckpointListeners.add(currentGlobalCheckpoint, listener, timeout);
} }
/** /**
@ -2166,9 +2183,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
private Engine createNewEngine(EngineConfig config) { private Engine createNewEngine(EngineConfig config) {
synchronized (mutex) { synchronized (mutex) {
if (state == IndexShardState.CLOSED) { verifyNotClosed();
throw new AlreadyClosedException(shardId + " can't create engine - shard is closed");
}
assert this.currentEngineReference.get() == null; assert this.currentEngineReference.get() == null;
Engine engine = newEngine(config); Engine engine = newEngine(config);
onNewEngine(engine); // call this before we pass the memory barrier otherwise actions that happen onNewEngine(engine); // call this before we pass the memory barrier otherwise actions that happen
@ -2314,19 +2329,14 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
bumpPrimaryTerm(opPrimaryTerm, () -> { bumpPrimaryTerm(opPrimaryTerm, () -> {
updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition"); updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition");
final long currentGlobalCheckpoint = getGlobalCheckpoint(); final long currentGlobalCheckpoint = getGlobalCheckpoint();
final long localCheckpoint; final long maxSeqNo = seqNoStats().getMaxSeqNo();
if (currentGlobalCheckpoint == UNASSIGNED_SEQ_NO) { logger.info("detected new primary with primary term [{}], global checkpoint [{}], max_seq_no [{}]",
localCheckpoint = NO_OPS_PERFORMED; opPrimaryTerm, currentGlobalCheckpoint, maxSeqNo);
if (currentGlobalCheckpoint < maxSeqNo) {
resetEngineToGlobalCheckpoint();
} else { } else {
localCheckpoint = currentGlobalCheckpoint;
}
logger.trace(
"detected new primary with primary term [{}], resetting local checkpoint from [{}] to [{}]",
opPrimaryTerm,
getLocalCheckpoint(),
localCheckpoint);
getEngine().resetLocalCheckpoint(localCheckpoint);
getEngine().rollTranslogGeneration(); getEngine().rollTranslogGeneration();
}
}); });
} }
} }
@ -2687,4 +2697,26 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
} }
}; };
} }
/**
* Rollback the current engine to the safe commit, then replay local translog up to the global checkpoint.
*/
void resetEngineToGlobalCheckpoint() throws IOException {
assert getActiveOperationsCount() == 0 : "Ongoing writes [" + getActiveOperations() + "]";
sync(); // persist the global checkpoint to disk
final long globalCheckpoint = getGlobalCheckpoint();
final Engine newEngine;
synchronized (mutex) {
verifyNotClosed();
IOUtils.close(currentEngineReference.getAndSet(null));
trimUnsafeCommits();
newEngine = createNewEngine(newEngineConfig());
active.set(true);
}
final Engine.TranslogRecoveryRunner translogRunner = (engine, snapshot) -> runTranslogRecovery(
engine, snapshot, Engine.Operation.Origin.LOCAL_RESET, () -> {
// TODO: add a dedicate recovery stats for the reset translog
});
newEngine.recoverFromTranslog(translogRunner, globalCheckpoint);
}
} }

View File

@ -21,6 +21,8 @@ package org.elasticsearch.index.similarity;
import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.Similarity;
import java.util.Objects;
/** /**
* Wrapper around a {@link Similarity} and its name. * Wrapper around a {@link Similarity} and its name.
*/ */
@ -48,4 +50,28 @@ public final class SimilarityProvider {
return similarity; return similarity;
} }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
SimilarityProvider that = (SimilarityProvider) o;
/**
* We check <code>name</code> only because the <code>similarity</code> is
* re-created for each new instance and they don't implement equals.
* This is not entirely correct though but we only use equality checks
* for similarities inside the same index and names are unique in this case.
**/
return Objects.equals(name, that.name);
}
@Override
public int hashCode() {
/**
* We use <code>name</code> only because the <code>similarity</code> is
* re-created for each new instance and they don't implement equals.
* This is not entirely correct though but we only use equality checks
* for similarities a single index and names are unique in this case.
**/
return Objects.hash(name);
}
} }

View File

@ -1439,11 +1439,28 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
*/ */
public void bootstrapNewHistory() throws IOException { public void bootstrapNewHistory() throws IOException {
metadataLock.writeLock().lock(); metadataLock.writeLock().lock();
try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { try {
final Map<String, String> userData = getUserData(writer); Map<String, String> userData = readLastCommittedSegmentsInfo().getUserData();
final long maxSeqNo = Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)); final long maxSeqNo = Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO));
bootstrapNewHistory(maxSeqNo);
} finally {
metadataLock.writeLock().unlock();
}
}
/**
* Marks an existing lucene index with a new history uuid and sets the given maxSeqNo as the local checkpoint
* as well as the maximum sequence number.
* This is used to make sure no existing shard will recovery from this index using ops based recovery.
* @see SequenceNumbers#LOCAL_CHECKPOINT_KEY
* @see SequenceNumbers#MAX_SEQ_NO
*/
public void bootstrapNewHistory(long maxSeqNo) throws IOException {
metadataLock.writeLock().lock();
try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) {
final Map<String, String> map = new HashMap<>(); final Map<String, String> map = new HashMap<>();
map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID());
map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo));
map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(maxSeqNo)); map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(maxSeqNo));
updateCommitData(writer, map); updateCommitData(writer, map);
} finally { } finally {

View File

@ -396,7 +396,6 @@ public class IndicesService extends AbstractLifecycleComponent
public IndexService indexService(Index index) { public IndexService indexService(Index index) {
return indices.get(index.getUUID()); return indices.get(index.getUUID());
} }
/** /**
* Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown. * Returns an IndexService for the specified index if exists otherwise a {@link IndexNotFoundException} is thrown.
*/ */

View File

@ -0,0 +1,167 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.repositories;
import org.apache.lucene.index.IndexCommit;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.component.LifecycleListener;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.snapshots.SnapshotInfo;
import org.elasticsearch.snapshots.SnapshotShardFailure;
import java.io.IOException;
import java.util.List;
public class FilterRepository implements Repository {
private final Repository in;
public FilterRepository(Repository in) {
this.in = in;
}
@Override
public RepositoryMetaData getMetadata() {
return in.getMetadata();
}
@Override
public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) {
return in.getSnapshotInfo(snapshotId);
}
@Override
public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) {
return in.getSnapshotGlobalMetaData(snapshotId);
}
@Override
public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId index) throws IOException {
return in.getSnapshotIndexMetaData(snapshotId, index);
}
@Override
public RepositoryData getRepositoryData() {
return in.getRepositoryData();
}
@Override
public void initializeSnapshot(SnapshotId snapshotId, List<IndexId> indices, MetaData metaData) {
in.initializeSnapshot(snapshotId, indices, metaData);
}
@Override
public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List<IndexId> indices, long startTime, String failure, int totalShards,
List<SnapshotShardFailure> shardFailures, long repositoryStateId, boolean includeGlobalState) {
return in.finalizeSnapshot(snapshotId, indices, startTime, failure, totalShards, shardFailures, repositoryStateId,
includeGlobalState);
}
@Override
public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) {
in.deleteSnapshot(snapshotId, repositoryStateId);
}
@Override
public long getSnapshotThrottleTimeInNanos() {
return in.getSnapshotThrottleTimeInNanos();
}
@Override
public long getRestoreThrottleTimeInNanos() {
return in.getRestoreThrottleTimeInNanos();
}
@Override
public String startVerification() {
return in.startVerification();
}
@Override
public void endVerification(String verificationToken) {
in.endVerification(verificationToken);
}
@Override
public void verify(String verificationToken, DiscoveryNode localNode) {
in.verify(verificationToken, localNode);
}
@Override
public boolean isReadOnly() {
return in.isReadOnly();
}
@Override
public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
IndexShardSnapshotStatus snapshotStatus) {
in.snapshotShard(shard, store, snapshotId, indexId, snapshotIndexCommit, snapshotStatus);
}
@Override
public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId,
RecoveryState recoveryState) {
in.restoreShard(shard, snapshotId, version, indexId, snapshotShardId, recoveryState);
}
@Override
public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) {
return in.getShardSnapshotStatus(snapshotId, version, indexId, shardId);
}
@Override
public Lifecycle.State lifecycleState() {
return in.lifecycleState();
}
@Override
public void addLifecycleListener(LifecycleListener listener) {
in.addLifecycleListener(listener);
}
@Override
public void removeLifecycleListener(LifecycleListener listener) {
in.removeLifecycleListener(listener);
}
@Override
public void start() {
in.start();
}
@Override
public void stop() {
in.stop();
}
@Override
public void close() {
in.close();
}
}

View File

@ -398,7 +398,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
"repository type [" + repositoryMetaData.type() + "] does not exist"); "repository type [" + repositoryMetaData.type() + "] does not exist");
} }
try { try {
Repository repository = factory.create(repositoryMetaData); Repository repository = factory.create(repositoryMetaData, typesRegistry::get);
repository.start(); repository.start();
return repository; return repository;
} catch (Exception e) { } catch (Exception e) {

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotInfo;
@ -35,6 +36,7 @@ import org.elasticsearch.snapshots.SnapshotShardFailure;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.function.Function;
/** /**
* An interface for interacting with a repository in snapshot and restore. * An interface for interacting with a repository in snapshot and restore.
@ -46,7 +48,7 @@ import java.util.List;
* <ul> * <ul>
* <li>Master calls {@link #initializeSnapshot(SnapshotId, List, org.elasticsearch.cluster.metadata.MetaData)} * <li>Master calls {@link #initializeSnapshot(SnapshotId, List, org.elasticsearch.cluster.metadata.MetaData)}
* with list of indices that will be included into the snapshot</li> * with list of indices that will be included into the snapshot</li>
* <li>Data nodes call {@link Repository#snapshotShard(IndexShard, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)} * <li>Data nodes call {@link Repository#snapshotShard(IndexShard, Store, SnapshotId, IndexId, IndexCommit, IndexShardSnapshotStatus)}
* for each shard</li> * for each shard</li>
* <li>When all shard calls return master calls {@link #finalizeSnapshot} with possible list of failures</li> * <li>When all shard calls return master calls {@link #finalizeSnapshot} with possible list of failures</li>
* </ul> * </ul>
@ -63,6 +65,10 @@ public interface Repository extends LifecycleComponent {
* @param metadata metadata for the repository including name and settings * @param metadata metadata for the repository including name and settings
*/ */
Repository create(RepositoryMetaData metadata) throws Exception; Repository create(RepositoryMetaData metadata) throws Exception;
default Repository create(RepositoryMetaData metaData, Function<String, Repository.Factory> typeLookup) throws Exception {
return create(metaData);
}
} }
/** /**
@ -188,14 +194,15 @@ public interface Repository extends LifecycleComponent {
* <p> * <p>
* As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check
* {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted. * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted.
*
* @param shard shard to be snapshotted * @param shard shard to be snapshotted
* @param store store to be snapshotted
* @param snapshotId snapshot id * @param snapshotId snapshot id
* @param indexId id for the index being snapshotted * @param indexId id for the index being snapshotted
* @param snapshotIndexCommit commit point * @param snapshotIndexCommit commit point
* @param snapshotStatus snapshot status * @param snapshotStatus snapshot status
*/ */
void snapshotShard(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus); void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
IndexShardSnapshotStatus snapshotStatus);
/** /**
* Restores snapshot of the shard. * Restores snapshot of the shard.

View File

@ -845,8 +845,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
} }
@Override @Override
public void snapshotShard(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, IndexShardSnapshotStatus snapshotStatus) { public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
SnapshotContext snapshotContext = new SnapshotContext(shard, snapshotId, indexId, snapshotStatus, System.currentTimeMillis()); IndexShardSnapshotStatus snapshotStatus) {
SnapshotContext snapshotContext = new SnapshotContext(store, snapshotId, indexId, snapshotStatus, System.currentTimeMillis());
try { try {
snapshotContext.snapshot(snapshotIndexCommit); snapshotContext.snapshot(snapshotIndexCommit);
} catch (Exception e) { } catch (Exception e) {
@ -854,7 +855,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
if (e instanceof IndexShardSnapshotFailedException) { if (e instanceof IndexShardSnapshotFailedException) {
throw (IndexShardSnapshotFailedException) e; throw (IndexShardSnapshotFailedException) e;
} else { } else {
throw new IndexShardSnapshotFailedException(shard.shardId(), e); throw new IndexShardSnapshotFailedException(store.shardId(), e);
} }
} }
} }
@ -1157,15 +1158,15 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
/** /**
* Constructs new context * Constructs new context
* *
* @param shard shard to be snapshotted * @param store store to be snapshotted
* @param snapshotId snapshot id * @param snapshotId snapshot id
* @param indexId the id of the index being snapshotted * @param indexId the id of the index being snapshotted
* @param snapshotStatus snapshot status to report progress * @param snapshotStatus snapshot status to report progress
*/ */
SnapshotContext(IndexShard shard, SnapshotId snapshotId, IndexId indexId, IndexShardSnapshotStatus snapshotStatus, long startTime) { SnapshotContext(Store store, SnapshotId snapshotId, IndexId indexId, IndexShardSnapshotStatus snapshotStatus, long startTime) {
super(snapshotId, Version.CURRENT, indexId, shard.shardId()); super(snapshotId, Version.CURRENT, indexId, store.shardId());
this.snapshotStatus = snapshotStatus; this.snapshotStatus = snapshotStatus;
this.store = shard.store(); this.store = store;
this.startTime = startTime; this.startTime = startTime;
} }

View File

@ -46,6 +46,7 @@ import java.util.Comparator;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Objects; import java.util.Objects;
import java.util.stream.Collectors;
/** /**
* Query sub phase which pulls data from doc values * Query sub phase which pulls data from doc values
@ -77,6 +78,15 @@ public final class DocValueFieldsFetchSubPhase implements FetchSubPhase {
hits = hits.clone(); // don't modify the incoming hits hits = hits.clone(); // don't modify the incoming hits
Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId));
List<String> noFormatFields = context.docValueFieldsContext().fields().stream().filter(f -> f.format == null).map(f -> f.field)
.collect(Collectors.toList());
if (noFormatFields.isEmpty() == false) {
DEPRECATION_LOGGER.deprecated("There are doc-value fields which are not using a format. The output will "
+ "change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass "
+ "[format={}] with a doc value field in order to opt in for the future behaviour and ease the migration to "
+ "7.0: {}", DocValueFieldsContext.USE_DEFAULT_FORMAT, noFormatFields);
}
for (FieldAndFormat fieldAndFormat : context.docValueFieldsContext().fields()) { for (FieldAndFormat fieldAndFormat : context.docValueFieldsContext().fields()) {
String field = fieldAndFormat.field; String field = fieldAndFormat.field;
MappedFieldType fieldType = context.mapperService().fullName(field); MappedFieldType fieldType = context.mapperService().fullName(field);
@ -84,10 +94,6 @@ public final class DocValueFieldsFetchSubPhase implements FetchSubPhase {
final IndexFieldData<?> indexFieldData = context.getForField(fieldType); final IndexFieldData<?> indexFieldData = context.getForField(fieldType);
final DocValueFormat format; final DocValueFormat format;
if (fieldAndFormat.format == null) { if (fieldAndFormat.format == null) {
DEPRECATION_LOGGER.deprecated("Doc-value field [" + fieldAndFormat.field + "] is not using a format. The output will " +
"change in 7.0 when doc value fields get formatted based on mappings by default. It is recommended to pass " +
"[format={}] with the doc value field in order to opt in for the future behaviour and ease the migration to " +
"7.0.", DocValueFieldsContext.USE_DEFAULT_FORMAT);
format = null; format = null;
} else { } else {
String formatDesc = fieldAndFormat.format; String formatDesc = fieldAndFormat.format;

View File

@ -389,7 +389,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
try { try {
// we flush first to make sure we get the latest writes snapshotted // we flush first to make sure we get the latest writes snapshotted
try (Engine.IndexCommitRef snapshotRef = indexShard.acquireLastIndexCommit(true)) { try (Engine.IndexCommitRef snapshotRef = indexShard.acquireLastIndexCommit(true)) {
repository.snapshotShard(indexShard, snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(), snapshotStatus); repository.snapshotShard(indexShard, indexShard.store(), snapshot.getSnapshotId(), indexId, snapshotRef.getIndexCommit(),
snapshotStatus);
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy(); final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.asCopy();
logger.debug("snapshot ({}) completed to {} with {}", snapshot, repository, lastSnapshotStatus); logger.debug("snapshot ({}) completed to {} with {}", snapshot, repository, lastSnapshotStatus);

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.SettingUpgrader;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.transport.TransportAddress;
@ -51,6 +52,11 @@ import java.util.stream.Stream;
*/ */
public abstract class RemoteClusterAware extends AbstractComponent { public abstract class RemoteClusterAware extends AbstractComponent {
static {
// remove search.remote.* settings in 8.0.0
assert Version.CURRENT.major < 8;
}
public static final Setting.AffixSetting<List<String>> SEARCH_REMOTE_CLUSTERS_SEEDS = public static final Setting.AffixSetting<List<String>> SEARCH_REMOTE_CLUSTERS_SEEDS =
Setting.affixKeySetting( Setting.affixKeySetting(
"search.remote.", "search.remote.",
@ -66,6 +72,20 @@ public abstract class RemoteClusterAware extends AbstractComponent {
Setting.Property.Dynamic, Setting.Property.Dynamic,
Setting.Property.NodeScope)); Setting.Property.NodeScope));
public static final SettingUpgrader<List<String>> SEARCH_REMOTE_CLUSTER_SEEDS_UPGRADER = new SettingUpgrader<List<String>>() {
@Override
public Setting<List<String>> getSetting() {
return SEARCH_REMOTE_CLUSTERS_SEEDS;
}
@Override
public String getKey(final String key) {
return key.replaceFirst("^search", "cluster");
}
};
/** /**
* A list of initial seed nodes to discover eligible nodes from the remote cluster * A list of initial seed nodes to discover eligible nodes from the remote cluster
*/ */
@ -105,6 +125,20 @@ public abstract class RemoteClusterAware extends AbstractComponent {
Setting.Property.NodeScope), Setting.Property.NodeScope),
REMOTE_CLUSTERS_SEEDS); REMOTE_CLUSTERS_SEEDS);
public static final SettingUpgrader<String> SEARCH_REMOTE_CLUSTERS_PROXY_UPGRADER = new SettingUpgrader<String>() {
@Override
public Setting<String> getSetting() {
return SEARCH_REMOTE_CLUSTERS_PROXY;
}
@Override
public String getKey(final String key) {
return key.replaceFirst("^search", "cluster");
}
};
/** /**
* A proxy address for the remote cluster. * A proxy address for the remote cluster.
* NOTE: this settings is undocumented until we have at last one transport that supports passing * NOTE: this settings is undocumented until we have at last one transport that supports passing

View File

@ -19,8 +19,6 @@
package org.elasticsearch.transport; package org.elasticsearch.transport;
import java.util.Collection;
import java.util.function.Supplier;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.OriginalIndices;
@ -35,6 +33,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.SettingUpgrader;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.CountDown;
@ -43,6 +42,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
@ -55,6 +55,7 @@ import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiFunction; import java.util.function.BiFunction;
import java.util.function.Function; import java.util.function.Function;
import java.util.function.Predicate; import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.Stream; import java.util.stream.Stream;
@ -65,6 +66,11 @@ import static org.elasticsearch.common.settings.Setting.boolSetting;
*/ */
public final class RemoteClusterService extends RemoteClusterAware implements Closeable { public final class RemoteClusterService extends RemoteClusterAware implements Closeable {
static {
// remove search.remote.* settings in 8.0.0
assert Version.CURRENT.major < 8;
}
public static final Setting<Integer> SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER = public static final Setting<Integer> SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER =
Setting.intSetting("search.remote.connections_per_cluster", 3, 1, Setting.Property.NodeScope, Setting.Property.Deprecated); Setting.intSetting("search.remote.connections_per_cluster", 3, 1, Setting.Property.NodeScope, Setting.Property.Deprecated);
@ -132,6 +138,20 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
key -> boolSetting(key, false, Setting.Property.Deprecated, Setting.Property.Dynamic, Setting.Property.NodeScope), key -> boolSetting(key, false, Setting.Property.Deprecated, Setting.Property.Dynamic, Setting.Property.NodeScope),
REMOTE_CLUSTERS_SEEDS); REMOTE_CLUSTERS_SEEDS);
public static final SettingUpgrader<Boolean> SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE_UPGRADER = new SettingUpgrader<Boolean>() {
@Override
public Setting<Boolean> getSetting() {
return SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE;
}
@Override
public String getKey(final String key) {
return key.replaceFirst("^search", "cluster");
}
};
public static final Setting.AffixSetting<Boolean> REMOTE_CLUSTER_SKIP_UNAVAILABLE = public static final Setting.AffixSetting<Boolean> REMOTE_CLUSTER_SKIP_UNAVAILABLE =
Setting.affixKeySetting( Setting.affixKeySetting(
"cluster.remote.", "cluster.remote.",

View File

@ -47,6 +47,7 @@ import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static org.hamcrest.Matchers.equalTo;
public class TransportResizeActionTests extends ESTestCase { public class TransportResizeActionTests extends ESTestCase {
@ -92,6 +93,16 @@ public class TransportResizeActionTests extends ESTestCase {
).getMessage().startsWith("Can't merge index with more than [2147483519] docs - too many documents in shards ")); ).getMessage().startsWith("Can't merge index with more than [2147483519] docs - too many documents in shards "));
IllegalArgumentException softDeletesError = expectThrows(IllegalArgumentException.class, () -> {
ResizeRequest req = new ResizeRequest("target", "source");
req.getTargetIndexRequest().settings(Settings.builder().put("index.soft_deletes.enabled", false));
ClusterState clusterState = createClusterState("source", 8, 1,
Settings.builder().put("index.blocks.write", true).put("index.soft_deletes.enabled", true).build());
TransportResizeAction.prepareCreateIndexRequest(req, clusterState,
(i) -> new DocsStats(between(10, 1000), between(1, 10), between(1, 10000)), "source", "target");
});
assertThat(softDeletesError.getMessage(), equalTo("Can't disable [index.soft_deletes.enabled] setting on resize"));
// create one that won't fail // create one that won't fail
ClusterState clusterState = ClusterState.builder(createClusterState("source", randomIntBetween(2, 10), 0, ClusterState clusterState = ClusterState.builder(createClusterState("source", randomIntBetween(2, 10), 0,
Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1")))

View File

@ -53,7 +53,6 @@ import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import static java.util.Collections.emptySet; import static java.util.Collections.emptySet;
@ -113,6 +112,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
@Override @Override
protected Settings nodeSettings(int nodeOrdinal) { protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder() return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
// manual collection or upon cluster forming. // manual collection or upon cluster forming.
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2) .put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2)
.put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s") .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s")
@ -121,8 +121,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
@Override @Override
protected Collection<Class<? extends Plugin>> nodePlugins() { protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(TestPlugin.class, return Arrays.asList(TestPlugin.class, MockTransportService.TestPlugin.class);
MockTransportService.TestPlugin.class);
} }
public void testClusterInfoServiceCollectsInformation() throws Exception { public void testClusterInfoServiceCollectsInformation() throws Exception {
@ -172,7 +171,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
} }
} }
public void testClusterInfoServiceInformationClearOnError() throws InterruptedException, ExecutionException { public void testClusterInfoServiceInformationClearOnError() {
internalCluster().startNodes(2, internalCluster().startNodes(2,
// manually control publishing // manually control publishing
Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), "60m").build()); Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), "60m").build());

View File

@ -261,6 +261,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
.put("index.version.upgraded", upgraded) .put("index.version.upgraded", upgraded)
.put("index.similarity.default.type", "BM25") .put("index.similarity.default.type", "BM25")
.put("index.analysis.analyzer.default.tokenizer", "keyword") .put("index.analysis.analyzer.default.tokenizer", "keyword")
.put("index.soft_deletes.enabled", "true")
.build(); .build();
runPrepareResizeIndexSettingsTest( runPrepareResizeIndexSettingsTest(
indexSettings, indexSettings,
@ -277,6 +278,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
assertThat(settings.get("index.allocation.max_retries"), equalTo("1")); assertThat(settings.get("index.allocation.max_retries"), equalTo("1"));
assertThat(settings.getAsVersion("index.version.created", null), equalTo(version)); assertThat(settings.getAsVersion("index.version.created", null), equalTo(version));
assertThat(settings.getAsVersion("index.version.upgraded", null), equalTo(upgraded)); assertThat(settings.getAsVersion("index.version.upgraded", null), equalTo(upgraded));
assertThat(settings.get("index.soft_deletes.enabled"), equalTo("true"));
}); });
} }
@ -337,6 +339,15 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
} }
public void testDoNotOverrideSoftDeletesSettingOnResize() {
runPrepareResizeIndexSettingsTest(
Settings.builder().put("index.soft_deletes.enabled", "false").build(),
Settings.builder().put("index.soft_deletes.enabled", "true").build(),
Collections.emptyList(),
randomBoolean(),
settings -> assertThat(settings.get("index.soft_deletes.enabled"), equalTo("true")));
}
private void runPrepareResizeIndexSettingsTest( private void runPrepareResizeIndexSettingsTest(
final Settings sourceSettings, final Settings sourceSettings,
final Settings requestSettings, final Settings requestSettings,

View File

@ -47,6 +47,7 @@ import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiConsumer; import java.util.function.BiConsumer;
import java.util.function.Consumer; import java.util.function.Consumer;
import java.util.function.Function; import java.util.function.Function;
import java.util.stream.Collectors;
import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.equalTo;
@ -1171,4 +1172,47 @@ public class ScopedSettingsTests extends ESTestCase {
} }
} }
public void testUpgradeListSetting() {
final Setting<List<String>> oldSetting =
Setting.listSetting("foo.old", Collections.emptyList(), Function.identity(), Property.NodeScope);
final Setting<List<String>> newSetting =
Setting.listSetting("foo.new", Collections.emptyList(), Function.identity(), Property.NodeScope);
final AbstractScopedSettings service =
new ClusterSettings(
Settings.EMPTY,
new HashSet<>(Arrays.asList(oldSetting, newSetting)),
Collections.singleton(new SettingUpgrader<List<String>>() {
@Override
public Setting<List<String>> getSetting() {
return oldSetting;
}
@Override
public String getKey(final String key) {
return "foo.new";
}
@Override
public List<String> getListValue(final List<String> value) {
return value.stream().map(s -> "new." + s).collect(Collectors.toList());
}
}));
final int length = randomIntBetween(0, 16);
final List<String> values = length == 0 ? Collections.emptyList() : new ArrayList<>(length);
for (int i = 0; i < length; i++) {
values.add(randomAlphaOfLength(8));
}
final Settings settings = Settings.builder().putList("foo.old", values).build();
final Settings upgradedSettings = service.upgradeSettings(settings);
assertFalse(oldSetting.exists(upgradedSettings));
assertTrue(newSetting.exists(upgradedSettings));
assertThat(
newSetting.get(upgradedSettings),
equalTo(oldSetting.get(settings).stream().map(s -> "new." + s).collect(Collectors.toList())));
}
} }

View File

@ -180,6 +180,13 @@ public class SettingTests extends ESTestCase {
} }
} }
public void testValidateStringSetting() {
Settings settings = Settings.builder().putList("foo.bar", Arrays.asList("bla-a", "bla-b")).build();
Setting<String> stringSetting = Setting.simpleString("foo.bar", Property.NodeScope);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> stringSetting.get(settings));
assertEquals("Found list type value for setting [foo.bar] but but did not expect a list for it.", e.getMessage());
}
private static final Setting<String> FOO_BAR_SETTING = new Setting<>( private static final Setting<String> FOO_BAR_SETTING = new Setting<>(
"foo.bar", "foo.bar",
"foobar", "foobar",

Some files were not shown because too many files have changed in this diff Show More