mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-17 10:25:15 +00:00
Merge remote-tracking branch 'origin/master' into index-lifecycle
This commit is contained in:
commit
243e863f6e
@ -350,26 +350,26 @@ and running elasticsearch distributions works correctly on supported operating s
|
||||
These tests should really only be run in vagrant vms because they're destructive.
|
||||
|
||||
. Install Virtual Box and Vagrant.
|
||||
|
||||
+
|
||||
. (Optional) Install https://github.com/fgrehm/vagrant-cachier[vagrant-cachier] to squeeze
|
||||
a bit more performance out of the process:
|
||||
|
||||
+
|
||||
--------------------------------------
|
||||
vagrant plugin install vagrant-cachier
|
||||
--------------------------------------
|
||||
|
||||
+
|
||||
. Validate your installed dependencies:
|
||||
|
||||
+
|
||||
-------------------------------------
|
||||
./gradlew :qa:vagrant:vagrantCheckVersion
|
||||
-------------------------------------
|
||||
|
||||
+
|
||||
. Download and smoke test the VMs with `./gradlew vagrantSmokeTest` or
|
||||
`./gradlew -Pvagrant.boxes=all vagrantSmokeTest`. The first time you run this it will
|
||||
download the base images and provision the boxes and immediately quit. Downloading all
|
||||
the images may take a long time. After the images are already on your machine, they won't
|
||||
be downloaded again unless they have been updated to a new version.
|
||||
|
||||
+
|
||||
. Run the tests with `./gradlew packagingTest`. This will cause Gradle to build
|
||||
the tar, zip, and deb packages and all the plugins. It will then run the tests
|
||||
on ubuntu-1404 and centos-7. We chose those two distributions as the default
|
||||
|
@ -35,6 +35,8 @@
|
||||
<module name="OuterTypeFilename" />
|
||||
<!-- No line wraps inside of import and package statements. -->
|
||||
<module name="NoLineWrap" />
|
||||
<!-- only one statement per line should be allowed -->
|
||||
<module name="OneStatementPerLine"/>
|
||||
<!-- Each java file has only one outer class -->
|
||||
<module name="OneTopLevelClass" />
|
||||
<!-- The suffix L is preferred, because the letter l (ell) is often
|
||||
|
@ -701,22 +701,4 @@
|
||||
<suppress files="plugins[/\\]repository-s3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]s3[/\\]MockDefaultS3OutputStream.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-s3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]repositories[/\\]s3[/\\]TestAmazonS3.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]store-smb[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]SmbDirectoryWrapper.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]MockInternalClusterInfoService.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]TestShardRouting.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]inject[/\\]ModuleTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]script[/\\]NativeSignificanceScoreScriptWithParams.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]BackgroundIndexer.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]CorruptionUtils.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]ESIntegTestCase.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]IndexSettingsModule.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]InternalTestCluster.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]MockIndexEventListener.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]discovery[/\\]ClusterDiscoveryConfiguration.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]disruption[/\\]IntermittentLongGCDisruption.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]disruption[/\\]SlowClusterStateProcessing.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]engine[/\\]AssertingSearcher.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]engine[/\\]MockEngineSupport.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]hamcrest[/\\]ElasticsearchAssertions.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSDirectoryService.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSIndexStore.java" checks="LineLength" />
|
||||
</suppressions>
|
||||
|
@ -48,6 +48,8 @@ import org.elasticsearch.client.ml.PostDataRequest;
|
||||
import org.elasticsearch.client.ml.PutCalendarRequest;
|
||||
import org.elasticsearch.client.ml.PutDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.PutJobRequest;
|
||||
import org.elasticsearch.client.ml.StartDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.StopDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.UpdateJobRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
@ -231,6 +233,32 @@ final class MLRequestConverters {
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request startDatafeed(StartDatafeedRequest startDatafeedRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("ml")
|
||||
.addPathPartAsIs("datafeeds")
|
||||
.addPathPart(startDatafeedRequest.getDatafeedId())
|
||||
.addPathPartAsIs("_start")
|
||||
.build();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
request.setEntity(createEntity(startDatafeedRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request stopDatafeed(StopDatafeedRequest stopDatafeedRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("ml")
|
||||
.addPathPartAsIs("datafeeds")
|
||||
.addPathPart(Strings.collectionToCommaDelimitedString(stopDatafeedRequest.getDatafeedIds()))
|
||||
.addPathPartAsIs("_stop")
|
||||
.build();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
request.setEntity(createEntity(stopDatafeedRequest, REQUEST_BODY_CONTENT_TYPE));
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request deleteForecast(DeleteForecastRequest deleteForecastRequest) {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
|
@ -58,6 +58,10 @@ import org.elasticsearch.client.ml.PutDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.PutDatafeedResponse;
|
||||
import org.elasticsearch.client.ml.PutJobRequest;
|
||||
import org.elasticsearch.client.ml.PutJobResponse;
|
||||
import org.elasticsearch.client.ml.StartDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.StartDatafeedResponse;
|
||||
import org.elasticsearch.client.ml.StopDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.StopDatafeedResponse;
|
||||
import org.elasticsearch.client.ml.UpdateJobRequest;
|
||||
import org.elasticsearch.client.ml.job.stats.JobStats;
|
||||
|
||||
@ -565,6 +569,86 @@ public final class MachineLearningClient {
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts the given Machine Learning Datafeed
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html">
|
||||
* ML Start Datafeed documentation</a>
|
||||
*
|
||||
* @param request The request to start the datafeed
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return action acknowledgement
|
||||
* @throws IOException when there is a serialization issue sending the request or receiving the response
|
||||
*/
|
||||
public StartDatafeedResponse startDatafeed(StartDatafeedRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(request,
|
||||
MLRequestConverters::startDatafeed,
|
||||
options,
|
||||
StartDatafeedResponse::fromXContent,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts the given Machine Learning Datafeed asynchronously and notifies the listener on completion
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-start-datafeed.html">
|
||||
* ML Start Datafeed documentation</a>
|
||||
*
|
||||
* @param request The request to start the datafeed
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener Listener to be notified upon request completion
|
||||
*/
|
||||
public void startDatafeedAsync(StartDatafeedRequest request, RequestOptions options, ActionListener<StartDatafeedResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request,
|
||||
MLRequestConverters::startDatafeed,
|
||||
options,
|
||||
StartDatafeedResponse::fromXContent,
|
||||
listener,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the given Machine Learning Datafeed
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html">
|
||||
* ML Stop Datafeed documentation</a>
|
||||
*
|
||||
* @param request The request to stop the datafeed
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return action acknowledgement
|
||||
* @throws IOException when there is a serialization issue sending the request or receiving the response
|
||||
*/
|
||||
public StopDatafeedResponse stopDatafeed(StopDatafeedRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(request,
|
||||
MLRequestConverters::stopDatafeed,
|
||||
options,
|
||||
StopDatafeedResponse::fromXContent,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Stops the given Machine Learning Datafeed asynchronously and notifies the listener on completion
|
||||
* <p>
|
||||
* For additional info
|
||||
* see <a href="http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-stop-datafeed.html">
|
||||
* ML Stop Datafeed documentation</a>
|
||||
*
|
||||
* @param request The request to stop the datafeed
|
||||
* @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener Listener to be notified upon request completion
|
||||
*/
|
||||
public void stopDatafeedAsync(StopDatafeedRequest request, RequestOptions options, ActionListener<StopDatafeedResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request,
|
||||
MLRequestConverters::stopDatafeed,
|
||||
options,
|
||||
StopDatafeedResponse::fromXContent,
|
||||
listener,
|
||||
Collections.emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates a Machine Learning {@link org.elasticsearch.client.ml.job.config.Job}
|
||||
* <p>
|
||||
|
@ -473,7 +473,8 @@ final class RequestConverters {
|
||||
Params params = new Params(request)
|
||||
.withRefresh(reindexRequest.isRefresh())
|
||||
.withTimeout(reindexRequest.getTimeout())
|
||||
.withWaitForActiveShards(reindexRequest.getWaitForActiveShards());
|
||||
.withWaitForActiveShards(reindexRequest.getWaitForActiveShards())
|
||||
.withRequestsPerSecond(reindexRequest.getRequestsPerSecond());
|
||||
|
||||
if (reindexRequest.getScrollTime() != null) {
|
||||
params.putParam("scroll", reindexRequest.getScrollTime());
|
||||
@ -492,6 +493,7 @@ final class RequestConverters {
|
||||
.withRefresh(updateByQueryRequest.isRefresh())
|
||||
.withTimeout(updateByQueryRequest.getTimeout())
|
||||
.withWaitForActiveShards(updateByQueryRequest.getWaitForActiveShards())
|
||||
.withRequestsPerSecond(updateByQueryRequest.getRequestsPerSecond())
|
||||
.withIndicesOptions(updateByQueryRequest.indicesOptions());
|
||||
if (updateByQueryRequest.isAbortOnVersionConflict() == false) {
|
||||
params.putParam("conflicts", "proceed");
|
||||
@ -518,6 +520,7 @@ final class RequestConverters {
|
||||
.withRefresh(deleteByQueryRequest.isRefresh())
|
||||
.withTimeout(deleteByQueryRequest.getTimeout())
|
||||
.withWaitForActiveShards(deleteByQueryRequest.getWaitForActiveShards())
|
||||
.withRequestsPerSecond(deleteByQueryRequest.getRequestsPerSecond())
|
||||
.withIndicesOptions(deleteByQueryRequest.indicesOptions());
|
||||
if (deleteByQueryRequest.isAbortOnVersionConflict() == false) {
|
||||
params.putParam("conflicts", "proceed");
|
||||
@ -535,6 +538,17 @@ final class RequestConverters {
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request rethrottle(RethrottleRequest rethrottleRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder().addPathPart("_reindex").addPathPart(rethrottleRequest.getTaskId().toString())
|
||||
.addPathPart("_rethrottle").build();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
Params params = new Params(request)
|
||||
.withRequestsPerSecond(rethrottleRequest.getRequestsPerSecond());
|
||||
// we set "group_by" to "none" because this is the response format we can parse back
|
||||
params.putParam("group_by", "none");
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request putScript(PutStoredScriptRequest putStoredScriptRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(putStoredScriptRequest.id()).build();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
@ -821,6 +835,16 @@ final class RequestConverters {
|
||||
return this;
|
||||
}
|
||||
|
||||
Params withRequestsPerSecond(float requestsPerSecond) {
|
||||
// the default in AbstractBulkByScrollRequest is Float.POSITIVE_INFINITY,
|
||||
// but we don't want to add that to the URL parameters, instead we use -1
|
||||
if (Float.isFinite(requestsPerSecond)) {
|
||||
return putParam(RethrottleRequest.REQUEST_PER_SECOND_PARAMETER, Float.toString(requestsPerSecond));
|
||||
} else {
|
||||
return putParam(RethrottleRequest.REQUEST_PER_SECOND_PARAMETER, "-1");
|
||||
}
|
||||
}
|
||||
|
||||
Params withRetryOnConflict(int retryOnConflict) {
|
||||
if (retryOnConflict > 0) {
|
||||
return putParam("retry_on_conflict", String.valueOf(retryOnConflict));
|
||||
@ -1065,7 +1089,7 @@ final class RequestConverters {
|
||||
private static String encodePart(String pathPart) {
|
||||
try {
|
||||
//encode each part (e.g. index, type and id) separately before merging them into the path
|
||||
//we prepend "/" to the path part to make this pate absolute, otherwise there can be issues with
|
||||
//we prepend "/" to the path part to make this path absolute, otherwise there can be issues with
|
||||
//paths that start with `-` or contain `:`
|
||||
URI uri = new URI(null, null, null, -1, "/" + pathPart, null, null);
|
||||
//manually encode any slash that each part may contain
|
||||
|
@ -25,6 +25,7 @@ import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse;
|
||||
@ -486,13 +487,14 @@ public class RestHighLevelClient implements Closeable {
|
||||
* Asynchronously executes an update by query request.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update-by-query.html">
|
||||
* Update By Query API on elastic.co</a>
|
||||
* @param updateByQueryRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public final void updateByQueryAsync(UpdateByQueryRequest reindexRequest, RequestOptions options,
|
||||
public final void updateByQueryAsync(UpdateByQueryRequest updateByQueryRequest, RequestOptions options,
|
||||
ActionListener<BulkByScrollResponse> listener) {
|
||||
performRequestAsyncAndParseEntity(
|
||||
reindexRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, listener, emptySet()
|
||||
updateByQueryRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, listener, emptySet()
|
||||
);
|
||||
}
|
||||
|
||||
@ -515,16 +517,45 @@ public class RestHighLevelClient implements Closeable {
|
||||
* Asynchronously executes a delete by query request.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html">
|
||||
* Delete By Query API on elastic.co</a>
|
||||
* @param deleteByQueryRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public final void deleteByQueryAsync(DeleteByQueryRequest reindexRequest, RequestOptions options,
|
||||
public final void deleteByQueryAsync(DeleteByQueryRequest deleteByQueryRequest, RequestOptions options,
|
||||
ActionListener<BulkByScrollResponse> listener) {
|
||||
performRequestAsyncAndParseEntity(
|
||||
reindexRequest, RequestConverters::deleteByQuery, options, BulkByScrollResponse::fromXContent, listener, emptySet()
|
||||
deleteByQueryRequest, RequestConverters::deleteByQuery, options, BulkByScrollResponse::fromXContent, listener, emptySet()
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a reindex rethrottling request.
|
||||
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html#docs-reindex-rethrottle">
|
||||
* Reindex rethrottling API on elastic.co</a>
|
||||
* @param rethrottleRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
public final ListTasksResponse reindexRethrottle(RethrottleRequest rethrottleRequest, RequestOptions options) throws IOException {
|
||||
return performRequestAndParseEntity(rethrottleRequest, RequestConverters::rethrottle, options, ListTasksResponse::fromXContent,
|
||||
emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a reindex rethrottling request.
|
||||
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html#docs-reindex-rethrottle">
|
||||
* Reindex rethrottling API on elastic.co</a>
|
||||
* @param rethrottleRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public final void reindexRethrottleAsync(RethrottleRequest rethrottleRequest, RequestOptions options,
|
||||
ActionListener<ListTasksResponse> listener) {
|
||||
performRequestAsyncAndParseEntity(rethrottleRequest, RequestConverters::rethrottle, options, ListTasksResponse::fromXContent,
|
||||
listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
|
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A request changing throttling of a task.
|
||||
*/
|
||||
public class RethrottleRequest implements Validatable {
|
||||
|
||||
static final String REQUEST_PER_SECOND_PARAMETER = "requests_per_second";
|
||||
|
||||
private final TaskId taskId;
|
||||
private final float requestsPerSecond;
|
||||
|
||||
/**
|
||||
* Create a new {@link RethrottleRequest} which disables any throttling for the given taskId.
|
||||
* @param taskId the task for which throttling will be disabled
|
||||
*/
|
||||
public RethrottleRequest(TaskId taskId) {
|
||||
this.taskId = taskId;
|
||||
this.requestsPerSecond = Float.POSITIVE_INFINITY;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new {@link RethrottleRequest} which changes the throttling for the given taskId.
|
||||
* @param taskId the task that throttling changes will be applied to
|
||||
* @param requestsPerSecond the number of requests per second that the task should perform. This needs to be a positive value.
|
||||
*/
|
||||
public RethrottleRequest(TaskId taskId, float requestsPerSecond) {
|
||||
Objects.requireNonNull(taskId, "taskId cannot be null");
|
||||
if (requestsPerSecond <= 0) {
|
||||
throw new IllegalArgumentException("requestsPerSecond needs to be positive value but was [" + requestsPerSecond+"]");
|
||||
}
|
||||
this.taskId = taskId;
|
||||
this.requestsPerSecond = requestsPerSecond;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the task Id
|
||||
*/
|
||||
public TaskId getTaskId() {
|
||||
return taskId;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the requests per seconds value
|
||||
*/
|
||||
public float getRequestsPerSecond() {
|
||||
return requestsPerSecond;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "RethrottleRequest: taskID = " + taskId +"; reqestsPerSecond = " + requestsPerSecond;
|
||||
}
|
||||
}
|
@ -0,0 +1,160 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.client.ml.datafeed.DatafeedConfig;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Request to start a Datafeed
|
||||
*/
|
||||
public class StartDatafeedRequest extends ActionRequest implements ToXContentObject {
|
||||
|
||||
public static final ParseField START = new ParseField("start");
|
||||
public static final ParseField END = new ParseField("end");
|
||||
public static final ParseField TIMEOUT = new ParseField("timeout");
|
||||
|
||||
public static ConstructingObjectParser<StartDatafeedRequest, Void> PARSER =
|
||||
new ConstructingObjectParser<>("start_datafeed_request", a -> new StartDatafeedRequest((String)a[0]));
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), DatafeedConfig.ID);
|
||||
PARSER.declareString(StartDatafeedRequest::setStart, START);
|
||||
PARSER.declareString(StartDatafeedRequest::setEnd, END);
|
||||
PARSER.declareString((params, val) ->
|
||||
params.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT);
|
||||
}
|
||||
|
||||
private final String datafeedId;
|
||||
private String start;
|
||||
private String end;
|
||||
private TimeValue timeout;
|
||||
|
||||
/**
|
||||
* Create a new StartDatafeedRequest for the given DatafeedId
|
||||
*
|
||||
* @param datafeedId non-null existing Datafeed ID
|
||||
*/
|
||||
public StartDatafeedRequest(String datafeedId) {
|
||||
this.datafeedId = Objects.requireNonNull(datafeedId, "[datafeed_id] must not be null");
|
||||
}
|
||||
|
||||
public String getDatafeedId() {
|
||||
return datafeedId;
|
||||
}
|
||||
|
||||
public String getStart() {
|
||||
return start;
|
||||
}
|
||||
|
||||
/**
|
||||
* The time that the datafeed should begin. This value is inclusive.
|
||||
*
|
||||
* If you specify a start value that is earlier than the timestamp of the latest processed record,
|
||||
* the datafeed continues from 1 millisecond after the timestamp of the latest processed record.
|
||||
*
|
||||
* If you do not specify a start time and the datafeed is associated with a new job,
|
||||
* the analysis starts from the earliest time for which data is available.
|
||||
*
|
||||
* @param start String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO 8601 string
|
||||
*/
|
||||
public void setStart(String start) {
|
||||
this.start = start;
|
||||
}
|
||||
|
||||
public String getEnd() {
|
||||
return end;
|
||||
}
|
||||
|
||||
/**
|
||||
* The time that the datafeed should end. This value is exclusive.
|
||||
* If you do not specify an end time, the datafeed runs continuously.
|
||||
*
|
||||
* @param end String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO 8601 string
|
||||
*/
|
||||
public void setEnd(String end) {
|
||||
this.end = end;
|
||||
}
|
||||
|
||||
public TimeValue getTimeout() {
|
||||
return timeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates how long to wait for the cluster to respond to the request.
|
||||
*
|
||||
* @param timeout TimeValue for how long to wait for a response from the cluster
|
||||
*/
|
||||
public void setTimeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(datafeedId, start, end, timeout);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (obj == null || obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
StartDatafeedRequest other = (StartDatafeedRequest) obj;
|
||||
return Objects.equals(datafeedId, other.datafeedId) &&
|
||||
Objects.equals(start, other.start) &&
|
||||
Objects.equals(end, other.end) &&
|
||||
Objects.equals(timeout, other.timeout);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(DatafeedConfig.ID.getPreferredName(), datafeedId);
|
||||
if (start != null) {
|
||||
builder.field(START.getPreferredName(), start);
|
||||
}
|
||||
if (end != null) {
|
||||
builder.field(END.getPreferredName(), end);
|
||||
}
|
||||
if (timeout != null) {
|
||||
builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep());
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
@ -0,0 +1,93 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Response indicating if the Machine Learning Datafeed is now started or not
|
||||
*/
|
||||
public class StartDatafeedResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
private static final ParseField STARTED = new ParseField("started");
|
||||
|
||||
public static final ConstructingObjectParser<StartDatafeedResponse, Void> PARSER =
|
||||
new ConstructingObjectParser<>(
|
||||
"start_datafeed_response",
|
||||
true,
|
||||
(a) -> new StartDatafeedResponse((Boolean)a[0]));
|
||||
|
||||
static {
|
||||
PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), STARTED);
|
||||
}
|
||||
|
||||
private final boolean started;
|
||||
|
||||
public StartDatafeedResponse(boolean started) {
|
||||
this.started = started;
|
||||
}
|
||||
|
||||
public static StartDatafeedResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Has the Datafeed started or not
|
||||
*
|
||||
* @return boolean value indicating the Datafeed started status
|
||||
*/
|
||||
public boolean isStarted() {
|
||||
return started;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
StartDatafeedResponse that = (StartDatafeedResponse) other;
|
||||
return isStarted() == that.isStarted();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(isStarted());
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(STARTED.getPreferredName(), started);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
@ -0,0 +1,195 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.client.ml.datafeed.DatafeedConfig;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.InvalidParameterException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Request to stop Machine Learning Datafeeds
|
||||
*/
|
||||
public class StopDatafeedRequest extends ActionRequest implements ToXContentObject {
|
||||
|
||||
public static final ParseField TIMEOUT = new ParseField("timeout");
|
||||
public static final ParseField FORCE = new ParseField("force");
|
||||
public static final ParseField ALLOW_NO_DATAFEEDS = new ParseField("allow_no_datafeeds");
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static final ConstructingObjectParser<StopDatafeedRequest, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"stop_datafeed_request",
|
||||
a -> new StopDatafeedRequest((List<String>) a[0]));
|
||||
|
||||
static {
|
||||
PARSER.declareField(ConstructingObjectParser.constructorArg(),
|
||||
p -> Arrays.asList(Strings.commaDelimitedListToStringArray(p.text())),
|
||||
DatafeedConfig.ID, ObjectParser.ValueType.STRING_ARRAY);
|
||||
PARSER.declareString((obj, val) -> obj.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT);
|
||||
PARSER.declareBoolean(StopDatafeedRequest::setForce, FORCE);
|
||||
PARSER.declareBoolean(StopDatafeedRequest::setAllowNoDatafeeds, ALLOW_NO_DATAFEEDS);
|
||||
}
|
||||
|
||||
private static final String ALL_DATAFEEDS = "_all";
|
||||
|
||||
private final List<String> datafeedIds;
|
||||
private TimeValue timeout;
|
||||
private Boolean force;
|
||||
private Boolean allowNoDatafeeds;
|
||||
|
||||
/**
|
||||
* Explicitly stop all datafeeds
|
||||
*
|
||||
* @return a {@link StopDatafeedRequest} for all existing datafeeds
|
||||
*/
|
||||
public static StopDatafeedRequest stopAllDatafeedsRequest(){
|
||||
return new StopDatafeedRequest(ALL_DATAFEEDS);
|
||||
}
|
||||
|
||||
StopDatafeedRequest(List<String> datafeedIds) {
|
||||
if (datafeedIds.isEmpty()) {
|
||||
throw new InvalidParameterException("datafeedIds must not be empty");
|
||||
}
|
||||
if (datafeedIds.stream().anyMatch(Objects::isNull)) {
|
||||
throw new NullPointerException("datafeedIds must not contain null values");
|
||||
}
|
||||
this.datafeedIds = new ArrayList<>(datafeedIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the specified Datafeeds via their unique datafeedIds
|
||||
*
|
||||
* @param datafeedIds must be non-null and non-empty and each datafeedId must be non-null
|
||||
*/
|
||||
public StopDatafeedRequest(String... datafeedIds) {
|
||||
this(Arrays.asList(datafeedIds));
|
||||
}
|
||||
|
||||
/**
|
||||
* All the datafeedIds to be stopped
|
||||
*/
|
||||
public List<String> getDatafeedIds() {
|
||||
return datafeedIds;
|
||||
}
|
||||
|
||||
public TimeValue getTimeout() {
|
||||
return timeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* How long to wait for the stop request to complete before timing out.
|
||||
*
|
||||
* @param timeout Default value: 30 minutes
|
||||
*/
|
||||
public void setTimeout(TimeValue timeout) {
|
||||
this.timeout = timeout;
|
||||
}
|
||||
|
||||
public Boolean isForce() {
|
||||
return force;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should the stopping be forced.
|
||||
*
|
||||
* Use to forcefully stop a datafeed
|
||||
*
|
||||
* @param force When {@code true} forcefully stop the datafeed. Defaults to {@code false}
|
||||
*/
|
||||
public void setForce(boolean force) {
|
||||
this.force = force;
|
||||
}
|
||||
|
||||
public Boolean isAllowNoDatafeeds() {
|
||||
return this.allowNoDatafeeds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Whether to ignore if a wildcard expression matches no datafeeds.
|
||||
*
|
||||
* This includes {@code _all} string.
|
||||
*
|
||||
* @param allowNoDatafeeds When {@code true} ignore if wildcard or {@code _all} matches no datafeeds. Defaults to {@code true}
|
||||
*/
|
||||
public void setAllowNoDatafeeds(boolean allowNoDatafeeds) {
|
||||
this.allowNoDatafeeds = allowNoDatafeeds;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(datafeedIds, timeout, force, allowNoDatafeeds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
StopDatafeedRequest that = (StopDatafeedRequest) other;
|
||||
return Objects.equals(datafeedIds, that.datafeedIds) &&
|
||||
Objects.equals(timeout, that.timeout) &&
|
||||
Objects.equals(force, that.force) &&
|
||||
Objects.equals(allowNoDatafeeds, that.allowNoDatafeeds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(DatafeedConfig.ID.getPreferredName(), Strings.collectionToCommaDelimitedString(datafeedIds));
|
||||
if (timeout != null) {
|
||||
builder.field(TIMEOUT.getPreferredName(), timeout.getStringRep());
|
||||
}
|
||||
if (force != null) {
|
||||
builder.field(FORCE.getPreferredName(), force);
|
||||
}
|
||||
if (allowNoDatafeeds != null) {
|
||||
builder.field(ALLOW_NO_DATAFEEDS.getPreferredName(), allowNoDatafeeds);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this);
|
||||
}
|
||||
}
|
@ -0,0 +1,93 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Response indicating if the Machine Learning Datafeed is now stopped or not
|
||||
*/
|
||||
public class StopDatafeedResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
private static final ParseField STOPPED = new ParseField("stopped");
|
||||
|
||||
public static final ConstructingObjectParser<StopDatafeedResponse, Void> PARSER =
|
||||
new ConstructingObjectParser<>(
|
||||
"stop_datafeed_response",
|
||||
true,
|
||||
(a) -> new StopDatafeedResponse((Boolean)a[0]));
|
||||
|
||||
static {
|
||||
PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), STOPPED);
|
||||
}
|
||||
|
||||
private final boolean stopped;
|
||||
|
||||
public StopDatafeedResponse(boolean stopped) {
|
||||
this.stopped = stopped;
|
||||
}
|
||||
|
||||
public static StopDatafeedResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Has the Datafeed stopped or not
|
||||
*
|
||||
* @return boolean value indicating the Datafeed stopped status
|
||||
*/
|
||||
public boolean isStopped() {
|
||||
return stopped;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
StopDatafeedResponse that = (StopDatafeedResponse) other;
|
||||
return isStopped() == that.isStopped();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(isStopped());
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(STOPPED.getPreferredName(), stopped);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
@ -0,0 +1,341 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
public class ActionStatus {
|
||||
|
||||
private final AckStatus ackStatus;
|
||||
@Nullable private final Execution lastExecution;
|
||||
@Nullable private final Execution lastSuccessfulExecution;
|
||||
@Nullable private final Throttle lastThrottle;
|
||||
|
||||
public ActionStatus(AckStatus ackStatus,
|
||||
@Nullable Execution lastExecution,
|
||||
@Nullable Execution lastSuccessfulExecution,
|
||||
@Nullable Throttle lastThrottle) {
|
||||
this.ackStatus = ackStatus;
|
||||
this.lastExecution = lastExecution;
|
||||
this.lastSuccessfulExecution = lastSuccessfulExecution;
|
||||
this.lastThrottle = lastThrottle;
|
||||
}
|
||||
|
||||
public AckStatus ackStatus() {
|
||||
return ackStatus;
|
||||
}
|
||||
|
||||
public Execution lastExecution() {
|
||||
return lastExecution;
|
||||
}
|
||||
|
||||
public Execution lastSuccessfulExecution() {
|
||||
return lastSuccessfulExecution;
|
||||
}
|
||||
|
||||
public Throttle lastThrottle() {
|
||||
return lastThrottle;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
ActionStatus that = (ActionStatus) o;
|
||||
|
||||
return Objects.equals(ackStatus, that.ackStatus) &&
|
||||
Objects.equals(lastExecution, that.lastExecution) &&
|
||||
Objects.equals(lastSuccessfulExecution, that.lastSuccessfulExecution) &&
|
||||
Objects.equals(lastThrottle, that.lastThrottle);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(ackStatus, lastExecution, lastSuccessfulExecution, lastThrottle);
|
||||
}
|
||||
|
||||
public static ActionStatus parse(String actionId, XContentParser parser) throws IOException {
|
||||
AckStatus ackStatus = null;
|
||||
Execution lastExecution = null;
|
||||
Execution lastSuccessfulExecution = null;
|
||||
Throttle lastThrottle = null;
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (Field.ACK_STATUS.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
ackStatus = AckStatus.parse(actionId, parser);
|
||||
} else if (Field.LAST_EXECUTION.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
lastExecution = Execution.parse(actionId, parser);
|
||||
} else if (Field.LAST_SUCCESSFUL_EXECUTION.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
lastSuccessfulExecution = Execution.parse(actionId, parser);
|
||||
} else if (Field.LAST_THROTTLE.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
lastThrottle = Throttle.parse(actionId, parser);
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
if (ackStatus == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field [{}]",
|
||||
actionId, Field.ACK_STATUS.getPreferredName());
|
||||
}
|
||||
return new ActionStatus(ackStatus, lastExecution, lastSuccessfulExecution, lastThrottle);
|
||||
}
|
||||
|
||||
public static class AckStatus {
|
||||
|
||||
public enum State {
|
||||
AWAITS_SUCCESSFUL_EXECUTION,
|
||||
ACKABLE,
|
||||
ACKED;
|
||||
}
|
||||
|
||||
private final DateTime timestamp;
|
||||
private final State state;
|
||||
|
||||
public AckStatus(DateTime timestamp, State state) {
|
||||
this.timestamp = timestamp.toDateTime(DateTimeZone.UTC);
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
public DateTime timestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public State state() {
|
||||
return state;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
AckStatus ackStatus = (AckStatus) o;
|
||||
|
||||
return Objects.equals(timestamp, ackStatus.timestamp) && Objects.equals(state, ackStatus.state);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(timestamp, state);
|
||||
}
|
||||
|
||||
public static AckStatus parse(String actionId, XContentParser parser) throws IOException {
|
||||
DateTime timestamp = null;
|
||||
State state = null;
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (Field.TIMESTAMP.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
timestamp = WatchStatusDateParser.parseDate(parser.text());
|
||||
} else if (Field.ACK_STATUS_STATE.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
state = State.valueOf(parser.text().toUpperCase(Locale.ROOT));
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
if (timestamp == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field [{}.{}]",
|
||||
actionId, Field.ACK_STATUS.getPreferredName(), Field.TIMESTAMP.getPreferredName());
|
||||
}
|
||||
if (state == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field [{}.{}]",
|
||||
actionId, Field.ACK_STATUS.getPreferredName(), Field.ACK_STATUS_STATE.getPreferredName());
|
||||
}
|
||||
return new AckStatus(timestamp, state);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Execution {
|
||||
|
||||
public static Execution successful(DateTime timestamp) {
|
||||
return new Execution(timestamp, true, null);
|
||||
}
|
||||
|
||||
public static Execution failure(DateTime timestamp, String reason) {
|
||||
return new Execution(timestamp, false, reason);
|
||||
}
|
||||
|
||||
private final DateTime timestamp;
|
||||
private final boolean successful;
|
||||
private final String reason;
|
||||
|
||||
private Execution(DateTime timestamp, boolean successful, String reason) {
|
||||
this.timestamp = timestamp.toDateTime(DateTimeZone.UTC);
|
||||
this.successful = successful;
|
||||
this.reason = reason;
|
||||
}
|
||||
|
||||
public DateTime timestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public boolean successful() {
|
||||
return successful;
|
||||
}
|
||||
|
||||
public String reason() {
|
||||
return reason;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
Execution execution = (Execution) o;
|
||||
|
||||
return Objects.equals(successful, execution.successful) &&
|
||||
Objects.equals(timestamp, execution.timestamp) &&
|
||||
Objects.equals(reason, execution.reason);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(timestamp, successful, reason);
|
||||
}
|
||||
|
||||
public static Execution parse(String actionId, XContentParser parser) throws IOException {
|
||||
DateTime timestamp = null;
|
||||
Boolean successful = null;
|
||||
String reason = null;
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (Field.TIMESTAMP.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
timestamp = WatchStatusDateParser.parseDate(parser.text());
|
||||
} else if (Field.EXECUTION_SUCCESSFUL.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
successful = parser.booleanValue();
|
||||
} else if (Field.REASON.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
reason = parser.text();
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
if (timestamp == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field [{}.{}]",
|
||||
actionId, Field.LAST_EXECUTION.getPreferredName(), Field.TIMESTAMP.getPreferredName());
|
||||
}
|
||||
if (successful == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field [{}.{}]",
|
||||
actionId, Field.LAST_EXECUTION.getPreferredName(), Field.EXECUTION_SUCCESSFUL.getPreferredName());
|
||||
}
|
||||
if (successful) {
|
||||
return successful(timestamp);
|
||||
}
|
||||
if (reason == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field for unsuccessful" +
|
||||
" execution [{}.{}]", actionId, Field.LAST_EXECUTION.getPreferredName(), Field.REASON.getPreferredName());
|
||||
}
|
||||
return failure(timestamp, reason);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Throttle {
|
||||
|
||||
private final DateTime timestamp;
|
||||
private final String reason;
|
||||
|
||||
public Throttle(DateTime timestamp, String reason) {
|
||||
this.timestamp = timestamp.toDateTime(DateTimeZone.UTC);
|
||||
this.reason = reason;
|
||||
}
|
||||
|
||||
public DateTime timestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public String reason() {
|
||||
return reason;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
Throttle throttle = (Throttle) o;
|
||||
return Objects.equals(timestamp, throttle.timestamp) && Objects.equals(reason, throttle.reason);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(timestamp, reason);
|
||||
}
|
||||
|
||||
public static Throttle parse(String actionId, XContentParser parser) throws IOException {
|
||||
DateTime timestamp = null;
|
||||
String reason = null;
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (Field.TIMESTAMP.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
timestamp = WatchStatusDateParser.parseDate(parser.text());
|
||||
} else if (Field.REASON.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
reason = parser.text();
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
if (timestamp == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field [{}.{}]",
|
||||
actionId, Field.LAST_THROTTLE.getPreferredName(), Field.TIMESTAMP.getPreferredName());
|
||||
}
|
||||
if (reason == null) {
|
||||
throw new ElasticsearchParseException("could not parse action status for [{}]. missing required field [{}.{}]",
|
||||
actionId, Field.LAST_THROTTLE.getPreferredName(), Field.REASON.getPreferredName());
|
||||
}
|
||||
return new Throttle(timestamp, reason);
|
||||
}
|
||||
}
|
||||
|
||||
private interface Field {
|
||||
ParseField ACK_STATUS = new ParseField("ack");
|
||||
ParseField ACK_STATUS_STATE = new ParseField("state");
|
||||
ParseField LAST_EXECUTION = new ParseField("last_execution");
|
||||
ParseField LAST_SUCCESSFUL_EXECUTION = new ParseField("last_successful_execution");
|
||||
ParseField EXECUTION_SUCCESSFUL = new ParseField("successful");
|
||||
ParseField LAST_THROTTLE = new ParseField("last_throttle");
|
||||
ParseField TIMESTAMP = new ParseField("timestamp");
|
||||
ParseField REASON = new ParseField("reason");
|
||||
}
|
||||
}
|
@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
public enum ExecutionState {
|
||||
|
||||
// the condition of the watch was not met
|
||||
EXECUTION_NOT_NEEDED,
|
||||
|
||||
// Execution has been throttled due to time-based throttling - this might only affect a single action though
|
||||
THROTTLED,
|
||||
|
||||
// Execution has been throttled due to ack-based throttling/muting of an action - this might only affect a single action though
|
||||
ACKNOWLEDGED,
|
||||
|
||||
// regular execution
|
||||
EXECUTED,
|
||||
|
||||
// an error in the condition or the execution of the input
|
||||
FAILED,
|
||||
|
||||
// a rejection due to a filled up threadpool
|
||||
THREADPOOL_REJECTION,
|
||||
|
||||
// the execution was scheduled, but in between the watch was deleted
|
||||
NOT_EXECUTED_WATCH_MISSING,
|
||||
|
||||
// even though the execution was scheduled, it was not executed, because the watch was already queued in the thread pool
|
||||
NOT_EXECUTED_ALREADY_QUEUED,
|
||||
|
||||
// this can happen when a watch was executed, but not completely finished (the triggered watch entry was not deleted), and then
|
||||
// watcher is restarted (manually or due to host switch) - the triggered watch will be executed but the history entry already
|
||||
// exists
|
||||
EXECUTED_MULTIPLE_TIMES;
|
||||
|
||||
public String id() {
|
||||
return name().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
public static ExecutionState resolve(String id) {
|
||||
return valueOf(id.toUpperCase(Locale.ROOT));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return id();
|
||||
}
|
||||
|
||||
}
|
@ -0,0 +1,233 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static org.elasticsearch.client.watcher.WatchStatusDateParser.parseDate;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
import static org.joda.time.DateTimeZone.UTC;
|
||||
|
||||
public class WatchStatus {
|
||||
|
||||
private final State state;
|
||||
|
||||
private final ExecutionState executionState;
|
||||
private final DateTime lastChecked;
|
||||
private final DateTime lastMetCondition;
|
||||
private final long version;
|
||||
private final Map<String, ActionStatus> actions;
|
||||
|
||||
public WatchStatus(long version,
|
||||
State state,
|
||||
ExecutionState executionState,
|
||||
DateTime lastChecked,
|
||||
DateTime lastMetCondition,
|
||||
Map<String, ActionStatus> actions) {
|
||||
this.version = version;
|
||||
this.lastChecked = lastChecked;
|
||||
this.lastMetCondition = lastMetCondition;
|
||||
this.actions = actions;
|
||||
this.state = state;
|
||||
this.executionState = executionState;
|
||||
}
|
||||
|
||||
public State state() {
|
||||
return state;
|
||||
}
|
||||
|
||||
public boolean checked() {
|
||||
return lastChecked != null;
|
||||
}
|
||||
|
||||
public DateTime lastChecked() {
|
||||
return lastChecked;
|
||||
}
|
||||
|
||||
public DateTime lastMetCondition() {
|
||||
return lastMetCondition;
|
||||
}
|
||||
|
||||
public ActionStatus actionStatus(String actionId) {
|
||||
return actions.get(actionId);
|
||||
}
|
||||
|
||||
public long version() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public ExecutionState getExecutionState() {
|
||||
return executionState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
WatchStatus that = (WatchStatus) o;
|
||||
|
||||
return Objects.equals(lastChecked, that.lastChecked) &&
|
||||
Objects.equals(lastMetCondition, that.lastMetCondition) &&
|
||||
Objects.equals(version, that.version) &&
|
||||
Objects.equals(executionState, that.executionState) &&
|
||||
Objects.equals(actions, that.actions);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(lastChecked, lastMetCondition, actions, version, executionState);
|
||||
}
|
||||
|
||||
public static WatchStatus parse(XContentParser parser) throws IOException {
|
||||
State state = null;
|
||||
ExecutionState executionState = null;
|
||||
DateTime lastChecked = null;
|
||||
DateTime lastMetCondition = null;
|
||||
Map<String, ActionStatus> actions = null;
|
||||
long version = -1;
|
||||
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation);
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (Field.STATE.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
try {
|
||||
state = State.parse(parser);
|
||||
} catch (ElasticsearchParseException e) {
|
||||
throw new ElasticsearchParseException("could not parse watch status. failed to parse field [{}]",
|
||||
e, currentFieldName);
|
||||
}
|
||||
} else if (Field.VERSION.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
if (token.isValue()) {
|
||||
version = parser.longValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse watch status. expecting field [{}] to hold a long " +
|
||||
"value, found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else if (Field.LAST_CHECKED.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
if (token.isValue()) {
|
||||
lastChecked = parseDate(currentFieldName, parser);
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse watch status. expecting field [{}] to hold a date " +
|
||||
"value, found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else if (Field.LAST_MET_CONDITION.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
if (token.isValue()) {
|
||||
lastMetCondition = parseDate(currentFieldName, parser);
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse watch status. expecting field [{}] to hold a date " +
|
||||
"value, found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else if (Field.EXECUTION_STATE.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
if (token.isValue()) {
|
||||
executionState = ExecutionState.resolve(parser.text());
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse watch status. expecting field [{}] to hold a string " +
|
||||
"value, found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else if (Field.ACTIONS.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
actions = new HashMap<>();
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else {
|
||||
ActionStatus actionStatus = ActionStatus.parse(currentFieldName, parser);
|
||||
actions.put(currentFieldName, actionStatus);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse watch status. expecting field [{}] to be an object, " +
|
||||
"found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
|
||||
actions = actions == null ? emptyMap() : unmodifiableMap(actions);
|
||||
return new WatchStatus(version, state, executionState, lastChecked, lastMetCondition, actions);
|
||||
}
|
||||
|
||||
public static class State {
|
||||
|
||||
private final boolean active;
|
||||
private final DateTime timestamp;
|
||||
|
||||
public State(boolean active, DateTime timestamp) {
|
||||
this.active = active;
|
||||
this.timestamp = timestamp;
|
||||
}
|
||||
|
||||
public boolean isActive() {
|
||||
return active;
|
||||
}
|
||||
|
||||
public DateTime getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public static State parse(XContentParser parser) throws IOException {
|
||||
if (parser.currentToken() != XContentParser.Token.START_OBJECT) {
|
||||
throw new ElasticsearchParseException("expected an object but found [{}] instead", parser.currentToken());
|
||||
}
|
||||
boolean active = true;
|
||||
DateTime timestamp = DateTime.now(UTC);
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (Field.ACTIVE.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
active = parser.booleanValue();
|
||||
} else if (Field.TIMESTAMP.match(currentFieldName, parser.getDeprecationHandler())) {
|
||||
timestamp = parseDate(currentFieldName, parser);
|
||||
}
|
||||
}
|
||||
return new State(active, timestamp);
|
||||
}
|
||||
}
|
||||
|
||||
public interface Field {
|
||||
ParseField STATE = new ParseField("state");
|
||||
ParseField ACTIVE = new ParseField("active");
|
||||
ParseField TIMESTAMP = new ParseField("timestamp");
|
||||
ParseField LAST_CHECKED = new ParseField("last_checked");
|
||||
ParseField LAST_MET_CONDITION = new ParseField("last_met_condition");
|
||||
ParseField ACTIONS = new ParseField("actions");
|
||||
ParseField VERSION = new ParseField("version");
|
||||
ParseField EXECUTION_STATE = new ParseField("execution_state");
|
||||
}
|
||||
}
|
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public final class WatchStatusDateParser {
|
||||
|
||||
private static final FormatDateTimeFormatter FORMATTER = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER;
|
||||
|
||||
private WatchStatusDateParser() {
|
||||
// Prevent instantiation.
|
||||
}
|
||||
|
||||
public static DateTime parseDate(String fieldName, XContentParser parser) throws IOException {
|
||||
XContentParser.Token token = parser.currentToken();
|
||||
if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
return new DateTime(parser.longValue(), DateTimeZone.UTC);
|
||||
}
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
DateTime dateTime = parseDate(parser.text());
|
||||
return dateTime.toDateTime(DateTimeZone.UTC);
|
||||
}
|
||||
if (token == XContentParser.Token.VALUE_NULL) {
|
||||
return null;
|
||||
}
|
||||
throw new ElasticsearchParseException("could not parse date/time. expected date field [{}] " +
|
||||
"to be either a number or a string but found [{}] instead", fieldName, token);
|
||||
}
|
||||
|
||||
public static DateTime parseDate(String text) {
|
||||
return FORMATTER.parser().parseDateTime(text);
|
||||
}
|
||||
}
|
@ -21,8 +21,12 @@ package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkProcessor;
|
||||
@ -52,12 +56,15 @@ import org.elasticsearch.index.get.GetResult;
|
||||
import org.elasticsearch.index.query.IdsQueryBuilder;
|
||||
import org.elasticsearch.index.reindex.BulkByScrollResponse;
|
||||
import org.elasticsearch.index.reindex.DeleteByQueryRequest;
|
||||
import org.elasticsearch.index.reindex.ReindexAction;
|
||||
import org.elasticsearch.index.reindex.ReindexRequest;
|
||||
import org.elasticsearch.index.reindex.UpdateByQueryRequest;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.tasks.RawTaskStatus;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
import org.joda.time.format.DateTimeFormat;
|
||||
@ -65,9 +72,15 @@ import org.joda.time.format.DateTimeFormat;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
|
||||
public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@ -631,7 +644,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
validateBulkResponses(nbItems, errors, bulkResponse, bulkRequest);
|
||||
}
|
||||
|
||||
public void testReindex() throws IOException {
|
||||
public void testReindex() throws Exception {
|
||||
final String sourceIndex = "source1";
|
||||
final String destinationIndex = "dest";
|
||||
{
|
||||
@ -642,15 +655,14 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
.build();
|
||||
createIndex(sourceIndex, settings);
|
||||
createIndex(destinationIndex, settings);
|
||||
BulkRequest bulkRequest = new BulkRequest()
|
||||
.add(new IndexRequest(sourceIndex, "type", "1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON))
|
||||
.add(new IndexRequest(sourceIndex, "type", "2").source(Collections.singletonMap("foo2", "bar2"), XContentType.JSON))
|
||||
.setRefreshPolicy(RefreshPolicy.IMMEDIATE);
|
||||
assertEquals(
|
||||
RestStatus.OK,
|
||||
highLevelClient().bulk(
|
||||
new BulkRequest()
|
||||
.add(new IndexRequest(sourceIndex, "type", "1")
|
||||
.source(Collections.singletonMap("foo", "bar"), XContentType.JSON))
|
||||
.add(new IndexRequest(sourceIndex, "type", "2")
|
||||
.source(Collections.singletonMap("foo2", "bar2"), XContentType.JSON))
|
||||
.setRefreshPolicy(RefreshPolicy.IMMEDIATE),
|
||||
bulkRequest,
|
||||
RequestOptions.DEFAULT
|
||||
).status()
|
||||
);
|
||||
@ -692,6 +704,72 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
|
||||
assertEquals(0, bulkResponse.getBulkFailures().size());
|
||||
assertEquals(0, bulkResponse.getSearchFailures().size());
|
||||
}
|
||||
{
|
||||
// test reindex rethrottling
|
||||
ReindexRequest reindexRequest = new ReindexRequest();
|
||||
reindexRequest.setSourceIndices(sourceIndex);
|
||||
reindexRequest.setDestIndex(destinationIndex);
|
||||
|
||||
// this following settings are supposed to halt reindexing after first document
|
||||
reindexRequest.setSourceBatchSize(1);
|
||||
reindexRequest.setRequestsPerSecond(0.00001f);
|
||||
final CountDownLatch reindexTaskFinished = new CountDownLatch(1);
|
||||
highLevelClient().reindexAsync(reindexRequest, RequestOptions.DEFAULT, new ActionListener<BulkByScrollResponse>() {
|
||||
|
||||
@Override
|
||||
public void onResponse(BulkByScrollResponse response) {
|
||||
reindexTaskFinished.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
fail(e.toString());
|
||||
}
|
||||
});
|
||||
|
||||
TaskGroup taskGroupToRethrottle = findTaskToRethrottle();
|
||||
assertThat(taskGroupToRethrottle.getChildTasks(), empty());
|
||||
TaskId taskIdToRethrottle = taskGroupToRethrottle.getTaskInfo().getTaskId();
|
||||
|
||||
float requestsPerSecond = 1000f;
|
||||
ListTasksResponse response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond),
|
||||
highLevelClient()::reindexRethrottle, highLevelClient()::reindexRethrottleAsync);
|
||||
assertThat(response.getTasks(), hasSize(1));
|
||||
assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId());
|
||||
assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class));
|
||||
assertEquals(Float.toString(requestsPerSecond),
|
||||
((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString());
|
||||
reindexTaskFinished.await(2, TimeUnit.SECONDS);
|
||||
|
||||
// any rethrottling after the reindex is done performed with the same taskId should result in a failure
|
||||
response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond),
|
||||
highLevelClient()::reindexRethrottle, highLevelClient()::reindexRethrottleAsync);
|
||||
assertTrue(response.getTasks().isEmpty());
|
||||
assertFalse(response.getNodeFailures().isEmpty());
|
||||
assertEquals(1, response.getNodeFailures().size());
|
||||
assertEquals("Elasticsearch exception [type=resource_not_found_exception, reason=task [" + taskIdToRethrottle + "] is missing]",
|
||||
response.getNodeFailures().get(0).getCause().getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private TaskGroup findTaskToRethrottle() throws IOException {
|
||||
long start = System.nanoTime();
|
||||
ListTasksRequest request = new ListTasksRequest();
|
||||
request.setActions(ReindexAction.NAME);
|
||||
request.setDetailed(true);
|
||||
do {
|
||||
ListTasksResponse list = highLevelClient().tasks().list(request, RequestOptions.DEFAULT);
|
||||
list.rethrowFailures("Finding tasks to rethrottle");
|
||||
assertThat("tasks are left over from the last execution of this test",
|
||||
list.getTaskGroups(), hasSize(lessThan(2)));
|
||||
if (0 == list.getTaskGroups().size()) {
|
||||
// The parent task hasn't started yet
|
||||
continue;
|
||||
}
|
||||
return list.getTaskGroups().get(0);
|
||||
} while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10));
|
||||
throw new AssertionError("Couldn't find tasks to rethrottle. Here are the running tasks " +
|
||||
highLevelClient().tasks().list(request, RequestOptions.DEFAULT));
|
||||
}
|
||||
|
||||
public void testUpdateByQuery() throws IOException {
|
||||
|
@ -44,6 +44,9 @@ import org.elasticsearch.client.ml.PostDataRequest;
|
||||
import org.elasticsearch.client.ml.PutCalendarRequest;
|
||||
import org.elasticsearch.client.ml.PutDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.PutJobRequest;
|
||||
import org.elasticsearch.client.ml.StartDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.StartDatafeedRequestTests;
|
||||
import org.elasticsearch.client.ml.StopDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.UpdateJobRequest;
|
||||
import org.elasticsearch.client.ml.calendars.Calendar;
|
||||
import org.elasticsearch.client.ml.calendars.CalendarTests;
|
||||
@ -261,6 +264,35 @@ public class MLRequestConvertersTests extends ESTestCase {
|
||||
assertEquals(Boolean.toString(true), request.getParameters().get("force"));
|
||||
}
|
||||
|
||||
public void testStartDatafeed() throws Exception {
|
||||
String datafeedId = DatafeedConfigTests.randomValidDatafeedId();
|
||||
StartDatafeedRequest datafeedRequest = StartDatafeedRequestTests.createRandomInstance(datafeedId);
|
||||
|
||||
Request request = MLRequestConverters.startDatafeed(datafeedRequest);
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_xpack/ml/datafeeds/" + datafeedId + "/_start", request.getEndpoint());
|
||||
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
|
||||
StartDatafeedRequest parsedDatafeedRequest = StartDatafeedRequest.PARSER.apply(parser, null);
|
||||
assertThat(parsedDatafeedRequest, equalTo(datafeedRequest));
|
||||
}
|
||||
}
|
||||
|
||||
public void testStopDatafeed() throws Exception {
|
||||
StopDatafeedRequest datafeedRequest = new StopDatafeedRequest("datafeed_1", "datafeed_2");
|
||||
datafeedRequest.setForce(true);
|
||||
datafeedRequest.setTimeout(TimeValue.timeValueMinutes(10));
|
||||
datafeedRequest.setAllowNoDatafeeds(true);
|
||||
Request request = MLRequestConverters.stopDatafeed(datafeedRequest);
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_xpack/ml/datafeeds/" +
|
||||
Strings.collectionToCommaDelimitedString(datafeedRequest.getDatafeedIds()) +
|
||||
"/_stop", request.getEndpoint());
|
||||
try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) {
|
||||
StopDatafeedRequest parsedDatafeedRequest = StopDatafeedRequest.PARSER.apply(parser, null);
|
||||
assertThat(parsedDatafeedRequest, equalTo(datafeedRequest));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeleteForecast() {
|
||||
String jobId = randomAlphaOfLength(10);
|
||||
DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest(jobId);
|
||||
|
@ -19,9 +19,14 @@
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
|
||||
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.ml.CloseJobRequest;
|
||||
import org.elasticsearch.client.ml.CloseJobResponse;
|
||||
@ -51,6 +56,10 @@ import org.elasticsearch.client.ml.PutDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.PutDatafeedResponse;
|
||||
import org.elasticsearch.client.ml.PutJobRequest;
|
||||
import org.elasticsearch.client.ml.PutJobResponse;
|
||||
import org.elasticsearch.client.ml.StartDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.StartDatafeedResponse;
|
||||
import org.elasticsearch.client.ml.StopDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.StopDatafeedResponse;
|
||||
import org.elasticsearch.client.ml.UpdateJobRequest;
|
||||
import org.elasticsearch.client.ml.calendars.Calendar;
|
||||
import org.elasticsearch.client.ml.calendars.CalendarTests;
|
||||
@ -63,6 +72,7 @@ import org.elasticsearch.client.ml.job.config.JobState;
|
||||
import org.elasticsearch.client.ml.job.config.JobUpdate;
|
||||
import org.elasticsearch.client.ml.job.stats.JobStats;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.junit.After;
|
||||
|
||||
@ -416,6 +426,146 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
||||
assertTrue(response.isAcknowledged());
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/33966")
|
||||
public void testStartDatafeed() throws Exception {
|
||||
String jobId = "test-start-datafeed";
|
||||
String indexName = "start_data_1";
|
||||
|
||||
// Set up the index and docs
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
|
||||
createIndexRequest.mapping("doc", "timestamp", "type=date", "total", "type=long");
|
||||
highLevelClient().indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
BulkRequest bulk = new BulkRequest();
|
||||
bulk.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
long now = System.currentTimeMillis();
|
||||
long oneDayAgo = now - 86400000;
|
||||
int i = 0;
|
||||
long dayAgoCopy = oneDayAgo;
|
||||
while(dayAgoCopy < now) {
|
||||
IndexRequest doc = new IndexRequest();
|
||||
doc.index(indexName);
|
||||
doc.type("doc");
|
||||
doc.id("id" + i);
|
||||
doc.source("{\"total\":" +randomInt(1000) + ",\"timestamp\":"+ dayAgoCopy +"}", XContentType.JSON);
|
||||
bulk.add(doc);
|
||||
dayAgoCopy += 1000000;
|
||||
i++;
|
||||
}
|
||||
highLevelClient().bulk(bulk, RequestOptions.DEFAULT);
|
||||
final long totalDocCount = i;
|
||||
|
||||
// create the job and the datafeed
|
||||
Job job = buildJob(jobId);
|
||||
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
||||
machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
|
||||
machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT);
|
||||
|
||||
String datafeedId = jobId + "-feed";
|
||||
DatafeedConfig datafeed = DatafeedConfig.builder(datafeedId, jobId)
|
||||
.setIndices(indexName)
|
||||
.setQueryDelay(TimeValue.timeValueSeconds(1))
|
||||
.setTypes(Arrays.asList("doc"))
|
||||
.setFrequency(TimeValue.timeValueSeconds(1)).build();
|
||||
machineLearningClient.putDatafeed(new PutDatafeedRequest(datafeed), RequestOptions.DEFAULT);
|
||||
|
||||
|
||||
StartDatafeedRequest startDatafeedRequest = new StartDatafeedRequest(datafeedId);
|
||||
startDatafeedRequest.setStart(String.valueOf(oneDayAgo));
|
||||
// Should only process two documents
|
||||
startDatafeedRequest.setEnd(String.valueOf(oneDayAgo + 2000000));
|
||||
StartDatafeedResponse response = execute(startDatafeedRequest,
|
||||
machineLearningClient::startDatafeed,
|
||||
machineLearningClient::startDatafeedAsync);
|
||||
|
||||
assertTrue(response.isStarted());
|
||||
|
||||
assertBusy(() -> {
|
||||
JobStats stats = machineLearningClient.getJobStats(new GetJobStatsRequest(jobId), RequestOptions.DEFAULT).jobStats().get(0);
|
||||
assertEquals(2L, stats.getDataCounts().getInputRecordCount());
|
||||
assertEquals(JobState.CLOSED, stats.getState());
|
||||
}, 30, TimeUnit.SECONDS);
|
||||
|
||||
machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT);
|
||||
StartDatafeedRequest wholeDataFeed = new StartDatafeedRequest(datafeedId);
|
||||
// Process all documents and end the stream
|
||||
wholeDataFeed.setEnd(String.valueOf(now));
|
||||
StartDatafeedResponse wholeResponse = execute(wholeDataFeed,
|
||||
machineLearningClient::startDatafeed,
|
||||
machineLearningClient::startDatafeedAsync);
|
||||
assertTrue(wholeResponse.isStarted());
|
||||
|
||||
assertBusy(() -> {
|
||||
JobStats stats = machineLearningClient.getJobStats(new GetJobStatsRequest(jobId), RequestOptions.DEFAULT).jobStats().get(0);
|
||||
assertEquals(totalDocCount, stats.getDataCounts().getInputRecordCount());
|
||||
assertEquals(JobState.CLOSED, stats.getState());
|
||||
}, 30, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
public void testStopDatafeed() throws Exception {
|
||||
String jobId1 = "test-stop-datafeed1";
|
||||
String jobId2 = "test-stop-datafeed2";
|
||||
String jobId3 = "test-stop-datafeed3";
|
||||
String indexName = "stop_data_1";
|
||||
|
||||
// Set up the index
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
|
||||
createIndexRequest.mapping("doc", "timestamp", "type=date", "total", "type=long");
|
||||
highLevelClient().indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
|
||||
// create the job and the datafeed
|
||||
Job job1 = buildJob(jobId1);
|
||||
putJob(job1);
|
||||
openJob(job1);
|
||||
|
||||
Job job2 = buildJob(jobId2);
|
||||
putJob(job2);
|
||||
openJob(job2);
|
||||
|
||||
Job job3 = buildJob(jobId3);
|
||||
putJob(job3);
|
||||
openJob(job3);
|
||||
|
||||
String datafeedId1 = createAndPutDatafeed(jobId1, indexName);
|
||||
String datafeedId2 = createAndPutDatafeed(jobId2, indexName);
|
||||
String datafeedId3 = createAndPutDatafeed(jobId3, indexName);
|
||||
|
||||
MachineLearningClient machineLearningClient = highLevelClient().machineLearning();
|
||||
|
||||
machineLearningClient.startDatafeed(new StartDatafeedRequest(datafeedId1), RequestOptions.DEFAULT);
|
||||
machineLearningClient.startDatafeed(new StartDatafeedRequest(datafeedId2), RequestOptions.DEFAULT);
|
||||
machineLearningClient.startDatafeed(new StartDatafeedRequest(datafeedId3), RequestOptions.DEFAULT);
|
||||
|
||||
{
|
||||
StopDatafeedRequest request = new StopDatafeedRequest(datafeedId1);
|
||||
request.setAllowNoDatafeeds(false);
|
||||
StopDatafeedResponse stopDatafeedResponse = execute(request,
|
||||
machineLearningClient::stopDatafeed,
|
||||
machineLearningClient::stopDatafeedAsync);
|
||||
assertTrue(stopDatafeedResponse.isStopped());
|
||||
}
|
||||
{
|
||||
StopDatafeedRequest request = new StopDatafeedRequest(datafeedId2, datafeedId3);
|
||||
request.setAllowNoDatafeeds(false);
|
||||
StopDatafeedResponse stopDatafeedResponse = execute(request,
|
||||
machineLearningClient::stopDatafeed,
|
||||
machineLearningClient::stopDatafeedAsync);
|
||||
assertTrue(stopDatafeedResponse.isStopped());
|
||||
}
|
||||
{
|
||||
StopDatafeedResponse stopDatafeedResponse = execute(new StopDatafeedRequest("datafeed_that_doesnot_exist*"),
|
||||
machineLearningClient::stopDatafeed,
|
||||
machineLearningClient::stopDatafeedAsync);
|
||||
assertTrue(stopDatafeedResponse.isStopped());
|
||||
}
|
||||
{
|
||||
StopDatafeedRequest request = new StopDatafeedRequest("datafeed_that_doesnot_exist*");
|
||||
request.setAllowNoDatafeeds(false);
|
||||
ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class,
|
||||
() -> execute(request, machineLearningClient::stopDatafeed, machineLearningClient::stopDatafeedAsync));
|
||||
assertThat(exception.status().getStatus(), equalTo(404));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeleteForecast() throws Exception {
|
||||
String jobId = "test-delete-forecast";
|
||||
|
||||
@ -561,4 +711,23 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
private void putJob(Job job) throws IOException {
|
||||
highLevelClient().machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
|
||||
}
|
||||
|
||||
private void openJob(Job job) throws IOException {
|
||||
highLevelClient().machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT);
|
||||
}
|
||||
|
||||
private String createAndPutDatafeed(String jobId, String indexName) throws IOException {
|
||||
String datafeedId = jobId + "-feed";
|
||||
DatafeedConfig datafeed = DatafeedConfig.builder(datafeedId, jobId)
|
||||
.setIndices(indexName)
|
||||
.setQueryDelay(TimeValue.timeValueSeconds(1))
|
||||
.setTypes(Arrays.asList("doc"))
|
||||
.setFrequency(TimeValue.timeValueSeconds(1)).build();
|
||||
highLevelClient().machineLearning().putDatafeed(new PutDatafeedRequest(datafeed), RequestOptions.DEFAULT);
|
||||
return datafeedId;
|
||||
}
|
||||
}
|
||||
|
@ -104,6 +104,7 @@ import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.rescore.QueryRescorerBuilder;
|
||||
import org.elasticsearch.search.suggest.SuggestBuilder;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.RandomObjects;
|
||||
|
||||
@ -327,6 +328,13 @@ public class RequestConvertersTests extends ESTestCase {
|
||||
if (randomBoolean()) {
|
||||
reindexRequest.setDestPipeline("my_pipeline");
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
float requestsPerSecond = (float) randomDoubleBetween(0.0, 10.0, false);
|
||||
expectedParams.put(RethrottleRequest.REQUEST_PER_SECOND_PARAMETER, Float.toString(requestsPerSecond));
|
||||
reindexRequest.setRequestsPerSecond(requestsPerSecond);
|
||||
} else {
|
||||
expectedParams.put(RethrottleRequest.REQUEST_PER_SECOND_PARAMETER, "-1");
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
reindexRequest.setDestRouting("=cat");
|
||||
}
|
||||
@ -369,6 +377,13 @@ public class RequestConvertersTests extends ESTestCase {
|
||||
updateByQueryRequest.setPipeline("my_pipeline");
|
||||
expectedParams.put("pipeline", "my_pipeline");
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
float requestsPerSecond = (float) randomDoubleBetween(0.0, 10.0, false);
|
||||
expectedParams.put("requests_per_second", Float.toString(requestsPerSecond));
|
||||
updateByQueryRequest.setRequestsPerSecond(requestsPerSecond);
|
||||
} else {
|
||||
expectedParams.put("requests_per_second", "-1");
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
updateByQueryRequest.setRouting("=cat");
|
||||
expectedParams.put("routing", "=cat");
|
||||
@ -440,6 +455,13 @@ public class RequestConvertersTests extends ESTestCase {
|
||||
if (randomBoolean()) {
|
||||
deleteByQueryRequest.setQuery(new TermQueryBuilder("foo", "fooval"));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
float requestsPerSecond = (float) randomDoubleBetween(0.0, 10.0, false);
|
||||
expectedParams.put("requests_per_second", Float.toString(requestsPerSecond));
|
||||
deleteByQueryRequest.setRequestsPerSecond(requestsPerSecond);
|
||||
} else {
|
||||
expectedParams.put("requests_per_second", "-1");
|
||||
}
|
||||
setRandomIndicesOptions(deleteByQueryRequest::setIndicesOptions, deleteByQueryRequest::indicesOptions, expectedParams);
|
||||
setRandomTimeout(deleteByQueryRequest::setTimeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams);
|
||||
Request request = RequestConverters.deleteByQuery(deleteByQueryRequest);
|
||||
@ -454,6 +476,34 @@ public class RequestConvertersTests extends ESTestCase {
|
||||
assertToXContentBody(deleteByQueryRequest, request.getEntity());
|
||||
}
|
||||
|
||||
public void testRethrottle() throws IOException {
|
||||
TaskId taskId = new TaskId(randomAlphaOfLength(10), randomIntBetween(1, 100));
|
||||
RethrottleRequest rethrottleRequest;
|
||||
Float requestsPerSecond;
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
if (frequently()) {
|
||||
requestsPerSecond = (float) randomDoubleBetween(0.0, 100.0, true);
|
||||
rethrottleRequest = new RethrottleRequest(taskId, requestsPerSecond);
|
||||
expectedParams.put(RethrottleRequest.REQUEST_PER_SECOND_PARAMETER, Float.toString(requestsPerSecond));
|
||||
} else {
|
||||
rethrottleRequest = new RethrottleRequest(taskId);
|
||||
expectedParams.put(RethrottleRequest.REQUEST_PER_SECOND_PARAMETER, "-1");
|
||||
}
|
||||
expectedParams.put("group_by", "none");
|
||||
Request request = RequestConverters.rethrottle(rethrottleRequest);
|
||||
assertEquals("/_reindex/" + taskId + "/_rethrottle", request.getEndpoint());
|
||||
assertEquals(HttpPost.METHOD_NAME, request.getMethod());
|
||||
assertEquals(expectedParams, request.getParameters());
|
||||
assertNull(request.getEntity());
|
||||
|
||||
// test illegal RethrottleRequest values
|
||||
Exception e = expectThrows(NullPointerException.class, () -> new RethrottleRequest(null, 1.0f));
|
||||
assertEquals("taskId cannot be null", e.getMessage());
|
||||
|
||||
e = expectThrows(IllegalArgumentException.class, () -> new RethrottleRequest(new TaskId("taskId", 1), -5.0f));
|
||||
assertEquals("requestsPerSecond needs to be positive value but was [-5.0]", e.getMessage());
|
||||
}
|
||||
|
||||
public void testIndex() throws IOException {
|
||||
String index = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonParseException;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
@ -672,7 +673,6 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||
"indices.get_upgrade",
|
||||
"indices.put_alias",
|
||||
"mtermvectors",
|
||||
"reindex_rethrottle",
|
||||
"render_search_template",
|
||||
"scripts_painless_execute",
|
||||
"tasks.get",
|
||||
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkProcessor;
|
||||
@ -50,6 +51,7 @@ import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.RethrottleRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -75,6 +77,7 @@ import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
@ -902,6 +905,48 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testReindexRethrottle() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
TaskId taskId = new TaskId("oTUltX4IQMOUUVeiohTt8A:124");
|
||||
{
|
||||
// tag::rethrottle-disable-request
|
||||
RethrottleRequest rethrottleRequest = new RethrottleRequest(taskId); // <1>
|
||||
client.reindexRethrottle(rethrottleRequest, RequestOptions.DEFAULT);
|
||||
// end::rethrottle-disable-request
|
||||
}
|
||||
|
||||
{
|
||||
// tag::rethrottle-request
|
||||
RethrottleRequest rethrottleRequest = new RethrottleRequest(taskId, 100.0f); // <1>
|
||||
client.reindexRethrottle(rethrottleRequest, RequestOptions.DEFAULT);
|
||||
// end::rethrottle-request
|
||||
}
|
||||
|
||||
// tag::rethrottle-request-async
|
||||
ActionListener<ListTasksResponse> listener = new ActionListener<ListTasksResponse>() {
|
||||
@Override
|
||||
public void onResponse(ListTasksResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::rethrottle-request-async
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
RethrottleRequest rethrottleRequest = new RethrottleRequest(taskId);
|
||||
// tag::rethrottle-execute-async
|
||||
client.reindexRethrottleAsync(rethrottleRequest, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::rethrottle-execute-async
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testUpdateByQuery() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
|
@ -20,6 +20,7 @@ package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
@ -70,6 +71,10 @@ import org.elasticsearch.client.ml.PutDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.PutDatafeedResponse;
|
||||
import org.elasticsearch.client.ml.PutJobRequest;
|
||||
import org.elasticsearch.client.ml.PutJobResponse;
|
||||
import org.elasticsearch.client.ml.StartDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.StartDatafeedResponse;
|
||||
import org.elasticsearch.client.ml.StopDatafeedRequest;
|
||||
import org.elasticsearch.client.ml.StopDatafeedResponse;
|
||||
import org.elasticsearch.client.ml.UpdateJobRequest;
|
||||
import org.elasticsearch.client.ml.calendars.Calendar;
|
||||
import org.elasticsearch.client.ml.datafeed.ChunkingConfig;
|
||||
@ -703,6 +708,120 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testStartDatafeed() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
Job job = MachineLearningIT.buildJob("start-datafeed-job");
|
||||
client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT);
|
||||
String datafeedId = job.getId() + "-feed";
|
||||
String indexName = "start_data_2";
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName);
|
||||
createIndexRequest.mapping("doc", "timestamp", "type=date", "total", "type=long");
|
||||
highLevelClient().indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
DatafeedConfig datafeed = DatafeedConfig.builder(datafeedId, job.getId())
|
||||
.setTypes(Arrays.asList("doc"))
|
||||
.setIndices(indexName)
|
||||
.build();
|
||||
client.machineLearning().putDatafeed(new PutDatafeedRequest(datafeed), RequestOptions.DEFAULT);
|
||||
client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT);
|
||||
{
|
||||
//tag::x-pack-ml-start-datafeed-request
|
||||
StartDatafeedRequest request = new StartDatafeedRequest(datafeedId); // <1>
|
||||
//end::x-pack-ml-start-datafeed-request
|
||||
|
||||
//tag::x-pack-ml-start-datafeed-request-options
|
||||
request.setEnd("2018-08-21T00:00:00Z"); // <1>
|
||||
request.setStart("2018-08-20T00:00:00Z"); // <2>
|
||||
request.setTimeout(TimeValue.timeValueMinutes(10)); // <3>
|
||||
//end::x-pack-ml-start-datafeed-request-options
|
||||
|
||||
//tag::x-pack-ml-start-datafeed-execute
|
||||
StartDatafeedResponse response = client.machineLearning().startDatafeed(request, RequestOptions.DEFAULT);
|
||||
boolean started = response.isStarted(); // <1>
|
||||
//end::x-pack-ml-start-datafeed-execute
|
||||
|
||||
assertTrue(started);
|
||||
}
|
||||
{
|
||||
StartDatafeedRequest request = new StartDatafeedRequest(datafeedId);
|
||||
|
||||
// tag::x-pack-ml-start-datafeed-listener
|
||||
ActionListener<StartDatafeedResponse> listener = new ActionListener<StartDatafeedResponse>() {
|
||||
@Override
|
||||
public void onResponse(StartDatafeedResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-ml-start-datafeed-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-ml-start-datafeed-execute-async
|
||||
client.machineLearning().startDatafeedAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-ml-start-datafeed-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testStopDatafeed() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
//tag::x-pack-ml-stop-datafeed-request
|
||||
StopDatafeedRequest request = new StopDatafeedRequest("datafeed_id1", "datafeed_id*"); // <1>
|
||||
//end::x-pack-ml-stop-datafeed-request
|
||||
request = StopDatafeedRequest.stopAllDatafeedsRequest();
|
||||
|
||||
//tag::x-pack-ml-stop-datafeed-request-options
|
||||
request.setAllowNoDatafeeds(true); // <1>
|
||||
request.setForce(true); // <2>
|
||||
request.setTimeout(TimeValue.timeValueMinutes(10)); // <3>
|
||||
//end::x-pack-ml-stop-datafeed-request-options
|
||||
|
||||
//tag::x-pack-ml-stop-datafeed-execute
|
||||
StopDatafeedResponse response = client.machineLearning().stopDatafeed(request, RequestOptions.DEFAULT);
|
||||
boolean stopped = response.isStopped(); // <1>
|
||||
//end::x-pack-ml-stop-datafeed-execute
|
||||
|
||||
assertTrue(stopped);
|
||||
}
|
||||
{
|
||||
StopDatafeedRequest request = StopDatafeedRequest.stopAllDatafeedsRequest();
|
||||
|
||||
// tag::x-pack-ml-stop-datafeed-listener
|
||||
ActionListener<StopDatafeedResponse> listener = new ActionListener<StopDatafeedResponse>() {
|
||||
@Override
|
||||
public void onResponse(StopDatafeedResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-ml-stop-datafeed-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-ml-stop-datafeed-execute-async
|
||||
client.machineLearning().stopDatafeedAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-ml-stop-datafeed-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetBuckets() throws IOException, InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
|
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class StartDatafeedRequestTests extends AbstractXContentTestCase<StartDatafeedRequest> {
|
||||
|
||||
public static StartDatafeedRequest createRandomInstance(String datafeedId) {
|
||||
StartDatafeedRequest request = new StartDatafeedRequest(datafeedId);
|
||||
|
||||
if (randomBoolean()) {
|
||||
request.setStart(String.valueOf(randomLongBetween(1, 1000)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setEnd(String.valueOf(randomLongBetween(1, 1000)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setTimeout(TimeValue.timeValueMinutes(randomLongBetween(1, 1000)));
|
||||
}
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected StartDatafeedRequest createTestInstance() {
|
||||
String datafeedId = DatafeedConfigTests.randomValidDatafeedId();
|
||||
return createRandomInstance(datafeedId);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected StartDatafeedRequest doParseInstance(XContentParser parser) throws IOException {
|
||||
return StartDatafeedRequest.PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
}
|
||||
}
|
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class StartDatafeedResponseTests extends AbstractXContentTestCase<StartDatafeedResponse> {
|
||||
|
||||
@Override
|
||||
protected StartDatafeedResponse createTestInstance() {
|
||||
return new StartDatafeedResponse(randomBoolean());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected StartDatafeedResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return StartDatafeedResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
}
|
@ -0,0 +1,81 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class StopDatafeedRequestTests extends AbstractXContentTestCase<StopDatafeedRequest> {
|
||||
|
||||
public void testCloseAllDatafeedsRequest() {
|
||||
StopDatafeedRequest request = StopDatafeedRequest.stopAllDatafeedsRequest();
|
||||
assertEquals(request.getDatafeedIds().size(), 1);
|
||||
assertEquals(request.getDatafeedIds().get(0), "_all");
|
||||
}
|
||||
|
||||
public void testWithNullDatafeedIds() {
|
||||
Exception exception = expectThrows(IllegalArgumentException.class, StopDatafeedRequest::new);
|
||||
assertEquals(exception.getMessage(), "datafeedIds must not be empty");
|
||||
|
||||
exception = expectThrows(NullPointerException.class, () -> new StopDatafeedRequest("datafeed1", null));
|
||||
assertEquals(exception.getMessage(), "datafeedIds must not contain null values");
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected StopDatafeedRequest createTestInstance() {
|
||||
int datafeedCount = randomIntBetween(1, 10);
|
||||
List<String> datafeedIds = new ArrayList<>(datafeedCount);
|
||||
|
||||
for (int i = 0; i < datafeedCount; i++) {
|
||||
datafeedIds.add(randomAlphaOfLength(10));
|
||||
}
|
||||
|
||||
StopDatafeedRequest request = new StopDatafeedRequest(datafeedIds.toArray(new String[0]));
|
||||
|
||||
if (randomBoolean()) {
|
||||
request.setAllowNoDatafeeds(randomBoolean());
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
request.setTimeout(TimeValue.timeValueMinutes(randomIntBetween(1, 10)));
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
request.setForce(randomBoolean());
|
||||
}
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected StopDatafeedRequest doParseInstance(XContentParser parser) throws IOException {
|
||||
return StopDatafeedRequest.PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
}
|
||||
}
|
@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client.ml;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class StopDatafeedResponseTests extends AbstractXContentTestCase<StopDatafeedResponse> {
|
||||
|
||||
@Override
|
||||
protected StopDatafeedResponse createTestInstance() {
|
||||
return new StopDatafeedResponse(randomBoolean());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected StopDatafeedResponse doParseInstance(XContentParser parser) throws IOException {
|
||||
return StopDatafeedResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
}
|
@ -0,0 +1,174 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.watcher;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.XContentTestUtils;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class WatchStatusTests extends ESTestCase {
|
||||
|
||||
public void testBasicParsing() throws IOException {
|
||||
int expectedVersion = randomIntBetween(0, 100);
|
||||
ExecutionState expectedExecutionState = randomFrom(ExecutionState.values());
|
||||
boolean expectedActive = randomBoolean();
|
||||
ActionStatus.AckStatus.State expectedAckState = randomFrom(ActionStatus.AckStatus.State.values());
|
||||
|
||||
XContentBuilder builder = createTestXContent(expectedVersion, expectedExecutionState,
|
||||
expectedActive, expectedAckState);
|
||||
BytesReference bytes = BytesReference.bytes(builder);
|
||||
|
||||
WatchStatus watchStatus = parse(builder.contentType(), bytes);
|
||||
|
||||
assertEquals(expectedVersion, watchStatus.version());
|
||||
assertEquals(expectedExecutionState, watchStatus.getExecutionState());
|
||||
|
||||
assertEquals(new DateTime(1432663467763L, DateTimeZone.UTC), watchStatus.lastChecked());
|
||||
assertEquals(DateTime.parse("2015-05-26T18:04:27.763Z"), watchStatus.lastMetCondition());
|
||||
|
||||
WatchStatus.State watchState = watchStatus.state();
|
||||
assertEquals(expectedActive, watchState.isActive());
|
||||
assertEquals(DateTime.parse("2015-05-26T18:04:27.723Z"), watchState.getTimestamp());
|
||||
|
||||
ActionStatus actionStatus = watchStatus.actionStatus("test_index");
|
||||
assertNotNull(actionStatus);
|
||||
|
||||
ActionStatus.AckStatus ackStatus = actionStatus.ackStatus();
|
||||
assertEquals(DateTime.parse("2015-05-26T18:04:27.763Z"), ackStatus.timestamp());
|
||||
assertEquals(expectedAckState, ackStatus.state());
|
||||
|
||||
ActionStatus.Execution lastExecution = actionStatus.lastExecution();
|
||||
assertEquals(DateTime.parse("2015-05-25T18:04:27.733Z"), lastExecution.timestamp());
|
||||
assertFalse(lastExecution.successful());
|
||||
assertEquals("failed to send email", lastExecution.reason());
|
||||
|
||||
ActionStatus.Execution lastSuccessfulExecution = actionStatus.lastSuccessfulExecution();
|
||||
assertEquals(DateTime.parse("2015-05-25T18:04:27.773Z"), lastSuccessfulExecution.timestamp());
|
||||
assertTrue(lastSuccessfulExecution.successful());
|
||||
assertNull(lastSuccessfulExecution.reason());
|
||||
|
||||
ActionStatus.Throttle lastThrottle = actionStatus.lastThrottle();
|
||||
assertEquals(DateTime.parse("2015-04-25T18:05:23.445Z"), lastThrottle.timestamp());
|
||||
assertEquals("throttling interval is set to [5 seconds] ...", lastThrottle.reason());
|
||||
}
|
||||
|
||||
public void testParsingWithUnknownKeys() throws IOException {
|
||||
int expectedVersion = randomIntBetween(0, 100);
|
||||
ExecutionState expectedExecutionState = randomFrom(ExecutionState.values());
|
||||
boolean expectedActive = randomBoolean();
|
||||
ActionStatus.AckStatus.State expectedAckState = randomFrom(ActionStatus.AckStatus.State.values());
|
||||
|
||||
XContentBuilder builder = createTestXContent(expectedVersion, expectedExecutionState,
|
||||
expectedActive, expectedAckState);
|
||||
BytesReference bytes = BytesReference.bytes(builder);
|
||||
|
||||
Predicate<String> excludeFilter = field -> field.equals("actions");
|
||||
BytesReference bytesWithRandomFields = XContentTestUtils.insertRandomFields(
|
||||
builder.contentType(), bytes, excludeFilter, random());
|
||||
|
||||
WatchStatus watchStatus = parse(builder.contentType(), bytesWithRandomFields);
|
||||
|
||||
assertEquals(expectedVersion, watchStatus.version());
|
||||
assertEquals(expectedExecutionState, watchStatus.getExecutionState());
|
||||
}
|
||||
|
||||
public void testOptionalFieldsParsing() throws IOException {
|
||||
XContentType contentType = randomFrom(XContentType.values());
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(contentType).startObject()
|
||||
.field("version", 42)
|
||||
.startObject("actions")
|
||||
.startObject("test_index")
|
||||
.startObject("ack")
|
||||
.field("timestamp", "2015-05-26T18:04:27.763Z")
|
||||
.field("state", "ackable")
|
||||
.endObject()
|
||||
.startObject("last_execution")
|
||||
.field("timestamp", "2015-05-25T18:04:27.733Z")
|
||||
.field("successful", false)
|
||||
.field("reason", "failed to send email")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject();
|
||||
BytesReference bytes = BytesReference.bytes(builder);
|
||||
|
||||
WatchStatus watchStatus = parse(builder.contentType(), bytes);
|
||||
|
||||
assertEquals(42, watchStatus.version());
|
||||
assertNull(watchStatus.getExecutionState());
|
||||
assertFalse(watchStatus.checked());
|
||||
}
|
||||
|
||||
private XContentBuilder createTestXContent(int version,
|
||||
ExecutionState executionState,
|
||||
boolean active,
|
||||
ActionStatus.AckStatus.State ackState) throws IOException {
|
||||
XContentType contentType = randomFrom(XContentType.values());
|
||||
return XContentFactory.contentBuilder(contentType).startObject()
|
||||
.field("version", version)
|
||||
.field("execution_state", executionState)
|
||||
.field("last_checked", 1432663467763L)
|
||||
.field("last_met_condition", "2015-05-26T18:04:27.763Z")
|
||||
.startObject("state")
|
||||
.field("active", active)
|
||||
.field("timestamp", "2015-05-26T18:04:27.723Z")
|
||||
.endObject()
|
||||
.startObject("actions")
|
||||
.startObject("test_index")
|
||||
.startObject("ack")
|
||||
.field("timestamp", "2015-05-26T18:04:27.763Z")
|
||||
.field("state", ackState)
|
||||
.endObject()
|
||||
.startObject("last_execution")
|
||||
.field("timestamp", "2015-05-25T18:04:27.733Z")
|
||||
.field("successful", false)
|
||||
.field("reason", "failed to send email")
|
||||
.endObject()
|
||||
.startObject("last_successful_execution")
|
||||
.field("timestamp", "2015-05-25T18:04:27.773Z")
|
||||
.field("successful", true)
|
||||
.endObject()
|
||||
.startObject("last_throttle")
|
||||
.field("timestamp", "2015-04-25T18:05:23.445Z")
|
||||
.field("reason", "throttling interval is set to [5 seconds] ...")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject();
|
||||
}
|
||||
|
||||
private WatchStatus parse(XContentType contentType, BytesReference bytes) throws IOException {
|
||||
XContentParser parser = XContentFactory.xContent(contentType)
|
||||
.createParser(NamedXContentRegistry.EMPTY, null, bytes.streamInput());
|
||||
parser.nextToken();
|
||||
|
||||
return WatchStatus.parse(parser);
|
||||
}
|
||||
}
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.test.rest;
|
||||
|
||||
import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase;
|
||||
import org.hamcrest.Matcher;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.BufferedReader;
|
||||
@ -29,9 +30,16 @@ import java.nio.file.Path;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase {
|
||||
@Override
|
||||
protected BufferedReader openReader(Path logFile) throws IOException {
|
||||
protected Matcher<String> nodeNameMatcher() {
|
||||
return is("node-0");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BufferedReader openReader(Path logFile) {
|
||||
return AccessController.doPrivileged((PrivilegedAction<BufferedReader>) () -> {
|
||||
try {
|
||||
return Files.newBufferedReader(logFile, StandardCharsets.UTF_8);
|
||||
|
@ -37,6 +37,14 @@
|
||||
-XX:CMSInitiatingOccupancyFraction=75
|
||||
-XX:+UseCMSInitiatingOccupancyOnly
|
||||
|
||||
## G1GC Configuration
|
||||
# NOTE: G1GC is only supported on JDK version 10 or later.
|
||||
# To use G1GC uncomment the lines below.
|
||||
# 10-:-XX:-UseConcMarkSweepGC
|
||||
# 10-:-XX:-UseCMSInitiatingOccupancyOnly
|
||||
# 10-:-XX:+UseG1GC
|
||||
# 10-:-XX:InitiatingHeapOccupancyPercent=75
|
||||
|
||||
## optimizations
|
||||
|
||||
# pre-touch memory pages used by the JVM during initialization
|
||||
|
@ -0,0 +1,60 @@
|
||||
[[java-rest-high-document-reindex-rethrottle]]
|
||||
=== Reindex Rethrottle API
|
||||
|
||||
[[java-rest-high-document-reindex-rethrottle-request]]
|
||||
==== Reindex Rethrolle Request
|
||||
|
||||
A `RethrottleRequest` can be used to change existing throttling on a runnind
|
||||
reindex task or disable it entirely. It requires the task Id of the reindex
|
||||
task to change.
|
||||
|
||||
In its simplest form, you can use it to disable throttling of a running
|
||||
reindex task using the following:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[rethrottle-disable-request]
|
||||
--------------------------------------------------
|
||||
<1> Create a `RethrottleRequest` that disables throttling for a specific task id
|
||||
|
||||
By providing a `requestsPerSecond` argument, the request will change the
|
||||
existing task throttling to the specified value:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[rethrottle-request]
|
||||
--------------------------------------------------
|
||||
<1> Request to change the throttling of a task to 100 requests per second
|
||||
|
||||
[[java-rest-high-document-reindex-rethrottle-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a rethrottle request requires both the `RethrottleRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[rethrottle-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The RethrottleRequest to execute and the ActionListener to use when the
|
||||
execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately.
|
||||
Once it is completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed. A typical listener looks like this:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/CRUDDocumentationIT.java[rethrottle-request-async]
|
||||
--------------------------------------------------
|
||||
<1> Code executed when the request is successfully completed
|
||||
<2> Code executed when the request fails with an exception
|
||||
|
||||
[[java-rest-high-document-reindex-retrottle-response]]
|
||||
==== Rethrottle Response
|
||||
|
||||
Rethrottling returns the task that has been rethrottled in the form of a
|
||||
`ListTasksResponse`. The structure of this response object is described in detail
|
||||
in <<java-rest-high-cluster-list-tasks-response,this section>>.
|
71
docs/java-rest/high-level/ml/start-datafeed.asciidoc
Normal file
71
docs/java-rest/high-level/ml/start-datafeed.asciidoc
Normal file
@ -0,0 +1,71 @@
|
||||
[[java-rest-high-x-pack-ml-start-datafeed]]
|
||||
=== Start Datafeed API
|
||||
|
||||
The Start Datafeed API provides the ability to start a {ml} datafeed in the cluster.
|
||||
It accepts a `StartDatafeedRequest` object and responds
|
||||
with a `StartDatafeedResponse` object.
|
||||
|
||||
[[java-rest-high-x-pack-ml-start-datafeed-request]]
|
||||
==== Start Datafeed Request
|
||||
|
||||
A `StartDatafeedRequest` object is created referencing a non-null `datafeedId`.
|
||||
All other fields are optional for the request.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-start-datafeed-request]
|
||||
--------------------------------------------------
|
||||
<1> Constructing a new request referencing an existing `datafeedId`
|
||||
|
||||
==== Optional Arguments
|
||||
|
||||
The following arguments are optional.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-start-datafeed-request-options]
|
||||
--------------------------------------------------
|
||||
<1> Set when the datafeed should end, the value is exclusive.
|
||||
May be an epoch seconds, epoch millis or an ISO 8601 string.
|
||||
"now" is a special value that indicates the current time.
|
||||
If you do not specify an end time, the datafeed runs continuously.
|
||||
<2> Set when the datafeed should start, the value is inclusive.
|
||||
May be an epoch seconds, epoch millis or an ISO 8601 string.
|
||||
If you do not specify a start time and the datafeed is associated with a new job,
|
||||
the analysis starts from the earliest time for which data is available.
|
||||
<3> Set the timeout for the request
|
||||
|
||||
[[java-rest-high-x-pack-ml-start-datafeed-execution]]
|
||||
==== Execution
|
||||
|
||||
The request can be executed through the `MachineLearningClient` contained
|
||||
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-start-datafeed-execute]
|
||||
--------------------------------------------------
|
||||
<1> Did the datafeed successfully start?
|
||||
|
||||
[[java-rest-high-x-pack-ml-start-datafeed-execution-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The request can also be executed asynchronously:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-start-datafeed-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `StartDatafeedRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The method does not block and returns immediately. The passed `ActionListener` is used
|
||||
to notify the caller of completion. A typical `ActionListener` for `StartDatafeedResponse` may
|
||||
look like
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-start-datafeed-listener]
|
||||
--------------------------------------------------
|
||||
<1> `onResponse` is called back when the action is completed successfully
|
||||
<2> `onFailure` is called back when some unexpected error occurs
|
66
docs/java-rest/high-level/ml/stop-datafeed.asciidoc
Normal file
66
docs/java-rest/high-level/ml/stop-datafeed.asciidoc
Normal file
@ -0,0 +1,66 @@
|
||||
[[java-rest-high-x-pack-ml-stop-datafeed]]
|
||||
=== Stop Datafeed API
|
||||
|
||||
The Stop Datafeed API provides the ability to stop a {ml} datafeed in the cluster.
|
||||
It accepts a `StopDatafeedRequest` object and responds
|
||||
with a `StopDatafeedResponse` object.
|
||||
|
||||
[[java-rest-high-x-pack-ml-stop-datafeed-request]]
|
||||
==== Stop Datafeed Request
|
||||
|
||||
A `StopDatafeedRequest` object is created referencing any number of non-null `datafeedId` entries.
|
||||
Wildcards and `_all` are also accepted.
|
||||
All other fields are optional for the request.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-stop-datafeed-request]
|
||||
--------------------------------------------------
|
||||
<1> Constructing a new request referencing existing `datafeedId` entries.
|
||||
|
||||
==== Optional Arguments
|
||||
|
||||
The following arguments are optional.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-stop-datafeed-request-options]
|
||||
--------------------------------------------------
|
||||
<1> Whether to ignore if a wildcard expression matches no datafeeds. (This includes `_all` string)
|
||||
<2> If true, the datafeed is stopped forcefully.
|
||||
<3> Controls the amount of time to wait until a datafeed stops. The default value is 20 seconds.
|
||||
|
||||
[[java-rest-high-x-pack-ml-stop-datafeed-execution]]
|
||||
==== Execution
|
||||
|
||||
The request can be executed through the `MachineLearningClient` contained
|
||||
in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-stop-datafeed-execute]
|
||||
--------------------------------------------------
|
||||
<1> Did the datafeed successfully stop?
|
||||
|
||||
[[java-rest-high-x-pack-ml-stop-datafeed-execution-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The request can also be executed asynchronously:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-stop-datafeed-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `StopDatafeedRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The method does not block and returns immediately. The passed `ActionListener` is used
|
||||
to notify the caller of completion. A typical `ActionListener` for `StopDatafeedResponse` may
|
||||
look like
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-stop-datafeed-listener]
|
||||
--------------------------------------------------
|
||||
<1> `onResponse` is called back when the action is completed successfully
|
||||
<2> `onFailure` is called back when some unexpected error occurs
|
@ -18,6 +18,7 @@ Multi-document APIs::
|
||||
* <<java-rest-high-document-reindex>>
|
||||
* <<java-rest-high-document-update-by-query>>
|
||||
* <<java-rest-high-document-delete-by-query>>
|
||||
* <<java-rest-high-document-reindex-rethrottle>>
|
||||
|
||||
include::document/index.asciidoc[]
|
||||
include::document/get.asciidoc[]
|
||||
@ -29,6 +30,7 @@ include::document/multi-get.asciidoc[]
|
||||
include::document/reindex.asciidoc[]
|
||||
include::document/update-by-query.asciidoc[]
|
||||
include::document/delete-by-query.asciidoc[]
|
||||
include::document/reindex-rethrottle.asciidoc[]
|
||||
|
||||
== Search APIs
|
||||
|
||||
@ -223,6 +225,8 @@ The Java High Level REST Client supports the following Machine Learning APIs:
|
||||
* <<java-rest-high-x-pack-ml-put-datafeed>>
|
||||
* <<java-rest-high-x-pack-ml-get-datafeed>>
|
||||
* <<java-rest-high-x-pack-ml-delete-datafeed>>
|
||||
* <<java-rest-high-x-pack-ml-start-datafeed>>
|
||||
* <<java-rest-high-x-pack-ml-stop-datafeed>>
|
||||
* <<java-rest-high-x-pack-ml-forecast-job>>
|
||||
* <<java-rest-high-x-pack-ml-delete-forecast>>
|
||||
* <<java-rest-high-x-pack-ml-get-buckets>>
|
||||
@ -245,6 +249,8 @@ include::ml/flush-job.asciidoc[]
|
||||
include::ml/put-datafeed.asciidoc[]
|
||||
include::ml/get-datafeed.asciidoc[]
|
||||
include::ml/delete-datafeed.asciidoc[]
|
||||
include::ml/start-datafeed.asciidoc[]
|
||||
include::ml/stop-datafeed.asciidoc[]
|
||||
include::ml/get-job-stats.asciidoc[]
|
||||
include::ml/forecast-job.asciidoc[]
|
||||
include::ml/delete-forecast.asciidoc[]
|
||||
|
@ -236,29 +236,36 @@ If everything goes well with installation, you should see a bunch of messages th
|
||||
|
||||
["source","sh",subs="attributes,callouts"]
|
||||
--------------------------------------------------
|
||||
[2016-09-16T14:17:51,251][INFO ][o.e.n.Node ] [] initializing ...
|
||||
[2016-09-16T14:17:51,329][INFO ][o.e.e.NodeEnvironment ] [6-bjhwl] using [1] data paths, mounts [[/ (/dev/sda1)]], net usable_space [317.7gb], net total_space [453.6gb], spins? [no], types [ext4]
|
||||
[2016-09-16T14:17:51,330][INFO ][o.e.e.NodeEnvironment ] [6-bjhwl] heap size [1.9gb], compressed ordinary object pointers [true]
|
||||
[2016-09-16T14:17:51,333][INFO ][o.e.n.Node ] [6-bjhwl] node name [6-bjhwl] derived from node ID; set [node.name] to override
|
||||
[2016-09-16T14:17:51,334][INFO ][o.e.n.Node ] [6-bjhwl] version[{version}], pid[21261], build[f5daa16/2016-09-16T09:12:24.346Z], OS[Linux/4.4.0-36-generic/amd64], JVM[Oracle Corporation/Java HotSpot(TM) 64-Bit Server VM/1.8.0_60/25.60-b23]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [aggs-matrix-stats]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [ingest-common]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-expression]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-mustache]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [lang-painless]
|
||||
[2016-09-16T14:17:51,967][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [percolator]
|
||||
[2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [reindex]
|
||||
[2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [transport-netty3]
|
||||
[2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded module [transport-netty4]
|
||||
[2016-09-16T14:17:51,968][INFO ][o.e.p.PluginsService ] [6-bjhwl] loaded plugin [mapper-murmur3]
|
||||
[2016-09-16T14:17:53,521][INFO ][o.e.n.Node ] [6-bjhwl] initialized
|
||||
[2016-09-16T14:17:53,521][INFO ][o.e.n.Node ] [6-bjhwl] starting ...
|
||||
[2016-09-16T14:17:53,671][INFO ][o.e.t.TransportService ] [6-bjhwl] publish_address {192.168.8.112:9300}, bound_addresses {{192.168.8.112:9300}
|
||||
[2016-09-16T14:17:53,676][WARN ][o.e.b.BootstrapCheck ] [6-bjhwl] max virtual memory areas vm.max_map_count [65530] likely too low, increase to at least [262144]
|
||||
[2016-09-16T14:17:56,718][INFO ][o.e.c.s.ClusterService ] [6-bjhwl] new_master {6-bjhwl}{6-bjhwl4TkajjoD2oEipnQ}{8m3SNKoFR6yQl1I0JUfPig}{192.168.8.112}{192.168.8.112:9300}, reason: zen-disco-elected-as-master ([0] nodes joined)
|
||||
[2016-09-16T14:17:56,731][INFO ][o.e.h.HttpServer ] [6-bjhwl] publish_address {192.168.8.112:9200}, bound_addresses {[::1]:9200}, {192.168.8.112:9200}
|
||||
[2016-09-16T14:17:56,732][INFO ][o.e.g.GatewayService ] [6-bjhwl] recovered [0] indices into cluster_state
|
||||
[2016-09-16T14:17:56,748][INFO ][o.e.n.Node ] [6-bjhwl] started
|
||||
[2018-09-13T12:20:01,766][INFO ][o.e.e.NodeEnvironment ] [localhost.localdomain] using [1] data paths, mounts [[/home (/dev/mapper/fedora-home)]], net usable_space [335.3gb], net total_space [410.3gb], types [ext4]
|
||||
[2018-09-13T12:20:01,772][INFO ][o.e.e.NodeEnvironment ] [localhost.localdomain] heap size [990.7mb], compressed ordinary object pointers [true]
|
||||
[2018-09-13T12:20:01,774][INFO ][o.e.n.Node ] [localhost.localdomain] node name [localhost.localdomain], node ID [B0aEHNagTiWx7SYj-l4NTw]
|
||||
[2018-09-13T12:20:01,775][INFO ][o.e.n.Node ] [localhost.localdomain] version[{version}], pid[13030], build[oss/zip/77fc20e/2018-09-13T15:37:57.478402Z], OS[Linux/4.16.11-100.fc26.x86_64/amd64], JVM["Oracle Corporation"/OpenJDK 64-Bit Server VM/10/10+46]
|
||||
[2018-09-13T12:20:01,775][INFO ][o.e.n.Node ] [localhost.localdomain] JVM arguments [-Xms1g, -Xmx1g, -XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -XX:-OmitStackTraceInFastThrow, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dio.netty.recycler.maxCapacityPerThread=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Djava.io.tmpdir=/tmp/elasticsearch.LN1ctLCi, -XX:+HeapDumpOnOutOfMemoryError, -XX:HeapDumpPath=data, -XX:ErrorFile=logs/hs_err_pid%p.log, -Xlog:gc*,gc+age=trace,safepoint:file=logs/gc.log:utctime,pid,tags:filecount=32,filesize=64m, -Djava.locale.providers=COMPAT, -XX:UseAVX=2, -Dio.netty.allocator.type=unpooled, -Des.path.home=/home/manybubbles/Workspaces/Elastic/master/elasticsearch/qa/unconfigured-node-name/build/cluster/integTestCluster node0/elasticsearch-7.0.0-alpha1-SNAPSHOT, -Des.path.conf=/home/manybubbles/Workspaces/Elastic/master/elasticsearch/qa/unconfigured-node-name/build/cluster/integTestCluster node0/elasticsearch-7.0.0-alpha1-SNAPSHOT/config, -Des.distribution.flavor=oss, -Des.distribution.type=zip]
|
||||
[2018-09-13T12:20:02,543][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [aggs-matrix-stats]
|
||||
[2018-09-13T12:20:02,543][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [analysis-common]
|
||||
[2018-09-13T12:20:02,543][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [ingest-common]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [lang-expression]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [lang-mustache]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [lang-painless]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [mapper-extras]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [parent-join]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [percolator]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [rank-eval]
|
||||
[2018-09-13T12:20:02,544][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [reindex]
|
||||
[2018-09-13T12:20:02,545][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [repository-url]
|
||||
[2018-09-13T12:20:02,545][INFO ][o.e.p.PluginsService ] [localhost.localdomain] loaded module [transport-netty4]
|
||||
[2018-09-13T12:20:02,545][INFO ][o.e.p.PluginsService ] [localhost.localdomain] no plugins loaded
|
||||
[2018-09-13T12:20:04,657][INFO ][o.e.d.DiscoveryModule ] [localhost.localdomain] using discovery type [zen]
|
||||
[2018-09-13T12:20:05,006][INFO ][o.e.n.Node ] [localhost.localdomain] initialized
|
||||
[2018-09-13T12:20:05,007][INFO ][o.e.n.Node ] [localhost.localdomain] starting ...
|
||||
[2018-09-13T12:20:05,202][INFO ][o.e.t.TransportService ] [localhost.localdomain] publish_address {127.0.0.1:9300}, bound_addresses {[::1]:9300}, {127.0.0.1:9300}
|
||||
[2018-09-13T12:20:05,221][WARN ][o.e.b.BootstrapChecks ] [localhost.localdomain] max file descriptors [4096] for elasticsearch process is too low, increase to at least [65536]
|
||||
[2018-09-13T12:20:05,221][WARN ][o.e.b.BootstrapChecks ] [localhost.localdomain] max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
|
||||
[2018-09-13T12:20:08,355][INFO ][o.e.c.s.MasterService ] [localhost.localdomain] zen-disco-elected-as-master ([0] nodes joined)[, ], reason: master node changed {previous [], current [{localhost.localdomain}{B0aEHNagTiWx7SYj-l4NTw}{hzsQz6CVQMCTpMCVLM4IHg}{127.0.0.1}{127.0.0.1:9300}{testattr=test}]}
|
||||
[2018-09-13T12:20:08,360][INFO ][o.e.c.s.ClusterApplierService] [localhost.localdomain] master node changed {previous [], current [{localhost.localdomain}{B0aEHNagTiWx7SYj-l4NTw}{hzsQz6CVQMCTpMCVLM4IHg}{127.0.0.1}{127.0.0.1:9300}{testattr=test}]}, reason: apply cluster state (from master [master {localhost.localdomain}{B0aEHNagTiWx7SYj-l4NTw}{hzsQz6CVQMCTpMCVLM4IHg}{127.0.0.1}{127.0.0.1:9300}{testattr=test} committed version [1] source [zen-disco-elected-as-master ([0] nodes joined)[, ]]])
|
||||
[2018-09-13T12:20:08,384][INFO ][o.e.h.n.Netty4HttpServerTransport] [localhost.localdomain] publish_address {127.0.0.1:9200}, bound_addresses {[::1]:9200}, {127.0.0.1:9200}
|
||||
[2018-09-13T12:20:08,384][INFO ][o.e.n.Node ] [localhost.localdomain] started
|
||||
|
||||
--------------------------------------------------
|
||||
|
||||
Without going too much into detail, we can see that our node named "6-bjhwl" (which will be a different set of characters in your case) has started and elected itself as a master in a single cluster. Don't worry yet at the moment what master means. The main thing that is important here is that we have started one node within one cluster.
|
||||
|
@ -15,7 +15,7 @@ use the <<search-request-scroll,Scroll>> API.
|
||||
[[maximum-document-size]]
|
||||
=== Avoid large documents
|
||||
|
||||
Given that the default <<modules-http,`http.max_context_length`>> is set to
|
||||
Given that the default <<modules-http,`http.max_content_length`>> is set to
|
||||
100MB, Elasticsearch will refuse to index any document that is larger than
|
||||
that. You might decide to increase that particular setting, but Lucene still
|
||||
has a limit of about 2GB.
|
||||
|
@ -18,3 +18,7 @@ appropriate request directly.
|
||||
* All classes present in `org.elasticsearch.search.aggregations.metrics.*` packages
|
||||
were moved to a single `org.elasticsearch.search.aggregations.metrics` package.
|
||||
|
||||
==== `Retry.withBackoff` methods with `Settings` removed
|
||||
|
||||
The variants of `Retry.withBackoff` that included `Settings` have been removed
|
||||
because `Settings` is no longer needed.
|
@ -2,6 +2,12 @@
|
||||
|
||||
=== Settings changes
|
||||
|
||||
==== The default for `node.name` is now the hostname
|
||||
|
||||
`node.name` now defaults to the hostname at the time when Elasticsearch
|
||||
is started. Previously the default node name was the first eight characters
|
||||
of the node id. It can still be configured explicitly in `elasticsearch.yml`.
|
||||
|
||||
==== Percolator
|
||||
|
||||
* The deprecated `index.percolator.map_unmapped_fields_as_string` setting has been removed in favour of
|
||||
|
489
docs/reference/ml/apis/find-file-structure.asciidoc
Normal file
489
docs/reference/ml/apis/find-file-structure.asciidoc
Normal file
@ -0,0 +1,489 @@
|
||||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[ml-find-file-structure]]
|
||||
=== Find File Structure API
|
||||
++++
|
||||
<titleabbrev>Find File Structure</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[]
|
||||
|
||||
Finds the structure of a text file. The text file must contain data that is
|
||||
suitable to be ingested into {es}.
|
||||
|
||||
==== Request
|
||||
|
||||
`POST _xpack/ml/find_file_structure`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
This API provides a starting point for ingesting data into {es} in a format that
|
||||
is suitable for subsequent use with other {ml} functionality.
|
||||
|
||||
Unlike other {es} endpoints, the data that is posted to this endpoint does not
|
||||
need to be UTF-8 encoded and in JSON format. It must, however, be text; binary
|
||||
file formats are not currently supported.
|
||||
|
||||
The response from the API contains:
|
||||
|
||||
* A couple of messages from the beginning of the file.
|
||||
* Statistics that reveal the most common values for all fields detected within
|
||||
the file and basic numeric statistics for numeric fields.
|
||||
* Information about the structure of the file, which is useful when you write
|
||||
ingest configurations to index the file contents.
|
||||
* Appropriate mappings for an {es} index, which you could use to ingest the file
|
||||
contents.
|
||||
|
||||
All this information can be calculated by the structure finder with no guidance.
|
||||
However, you can optionally override some of the decisions about the file
|
||||
structure by specifying one or more query parameters.
|
||||
|
||||
Details of the output can be seen in the
|
||||
<<ml-find-file-structure-examples,examples>>.
|
||||
|
||||
If the structure finder produces unexpected results for a particular file,
|
||||
specify the `explain` query parameter. It causes an `explanation` to appear in
|
||||
the response, which should help in determining why the returned structure was
|
||||
chosen.
|
||||
|
||||
==== Query Parameters
|
||||
|
||||
`charset`::
|
||||
(string) The file's character set. It must be a character set that is supported
|
||||
by the JVM that {es} uses. For example, `UTF-8`, `UTF-16LE`, `windows-1252`, or
|
||||
`EUC-JP`. If this parameter is not specified, the structure finder chooses an
|
||||
appropriate character set.
|
||||
|
||||
`column_names`::
|
||||
(string) If you have set `format` to `delimited`, you can specify the column names
|
||||
in a comma-separated list. If this parameter is not specified, the structure
|
||||
finder uses the column names from the header row of the file. If the file does
|
||||
not have a header role, columns are named "column1", "column2", "column3", etc.
|
||||
|
||||
`delimiter`::
|
||||
(string) If you have set `format` to `delimited`, you can specify the character used
|
||||
to delimit the values in each row. Only a single character is supported; the
|
||||
delimiter cannot have multiple characters. If this parameter is not specified,
|
||||
the structure finder considers the following possibilities: comma, tab,
|
||||
semi-colon, and pipe (`|`).
|
||||
|
||||
`explain`::
|
||||
(boolean) If this parameter is set to `true`, the response includes a field
|
||||
named `explanation`, which is an array of strings that indicate how the
|
||||
structure finder produced its result. The default value is `false`.
|
||||
|
||||
`format`::
|
||||
(string) The high level structure of the file. Valid values are `json`, `xml`,
|
||||
`delimited`, and `semi_structured_text`. If this parameter is not specified,
|
||||
the structure finder chooses one.
|
||||
|
||||
`grok_pattern`::
|
||||
(string) If you have set `format` to `semi_structured_text`, you can specify a Grok
|
||||
pattern that is used to extract fields from every message in the file. The
|
||||
name of the timestamp field in the Grok pattern must match what is specified
|
||||
in the `timestamp_field` parameter. If that parameter is not specified, the
|
||||
name of the timestamp field in the Grok pattern must match "timestamp". If
|
||||
`grok_pattern` is not specified, the structure finder creates a Grok pattern.
|
||||
|
||||
`has_header_row`::
|
||||
(boolean) If you have set `format` to `delimited`, you can use this parameter to
|
||||
indicate whether the column names are in the first row of the file. If this
|
||||
parameter is not specified, the structure finder guesses based on the similarity of
|
||||
the first row of the file to other rows.
|
||||
|
||||
`lines_to_sample`::
|
||||
(unsigned integer) The number of lines to include in the structural analysis,
|
||||
starting from the beginning of the file. The minimum is 2; the default
|
||||
is 1000. If the value of this parameter is greater than the number of lines in
|
||||
the file, the analysis proceeds (as long as there are at least two lines in the
|
||||
file) for all of the lines. +
|
||||
+
|
||||
--
|
||||
NOTE: The number of lines and the variation of the lines affects the speed of
|
||||
the analysis. For example, if you upload a log file where the first 1000 lines
|
||||
are all variations on the same message, the analysis will find more commonality
|
||||
than would be seen with a bigger sample. If possible, however, it is more
|
||||
efficient to upload a sample file with more variety in the first 1000 lines than
|
||||
to request analysis of 100000 lines to achieve some variety.
|
||||
--
|
||||
|
||||
`quote`::
|
||||
(string) If you have set `format` to `delimited`, you can specify the character used
|
||||
to quote the values in each row if they contain newlines or the delimiter
|
||||
character. Only a single character is supported. If this parameter is not
|
||||
specified, the default value is a double quote (`"`). If your delimited file
|
||||
format does not use quoting, a workaround is to set this argument to a
|
||||
character that does not appear anywhere in the sample.
|
||||
|
||||
`should_trim_fields`::
|
||||
(boolean) If you have set `format` to `delimited`, you can specify whether values
|
||||
between delimiters should have whitespace trimmed from them. If this parameter
|
||||
is not specified and the delimiter is pipe (`|`), the default value is `true`.
|
||||
Otherwise, the default value is `false`.
|
||||
|
||||
`timestamp_field`::
|
||||
(string) The name of the field that contains the primary timestamp of each
|
||||
record in the file. In particular, if the file were ingested into an index,
|
||||
this is the field that would be used to populate the `@timestamp` field. +
|
||||
+
|
||||
--
|
||||
If the `format` is `semi_structured_text`, this field must match the name of the
|
||||
appropriate extraction in the `grok_pattern`. Therefore, for semi-structured
|
||||
file formats, it is best not to specify this parameter unless `grok_pattern` is
|
||||
also specified.
|
||||
|
||||
For structured file formats, if you specify this parameter, the field must exist
|
||||
within the file.
|
||||
|
||||
If this parameter is not specified, the structure finder makes a decision about which
|
||||
field (if any) is the primary timestamp field. For structured file formats, it
|
||||
is not compulsory to have a timestamp in the file.
|
||||
--
|
||||
|
||||
`timestamp_format`::
|
||||
(string) The time format of the timestamp field in the file. +
|
||||
+
|
||||
--
|
||||
NOTE: Currently there is a limitation that this format must be one that the
|
||||
structure finder might choose by itself. The reason for this restriction is that
|
||||
to consistently set all the fields in the response the structure finder needs a
|
||||
corresponding Grok pattern name and simple regular expression for each timestamp
|
||||
format. Therefore, there is little value in specifying this parameter for
|
||||
structured file formats. If you know which field contains your primary timestamp,
|
||||
it is as good and less error-prone to just specify `timestamp_field`.
|
||||
|
||||
The valuable use case for this parameter is when the format is semi-structured
|
||||
text, there are multiple timestamp formats in the file, and you know which
|
||||
format corresponds to the primary timestamp, but you do not want to specify the
|
||||
full `grok_pattern`.
|
||||
|
||||
If this parameter is not specified, the structure finder chooses the best format from
|
||||
the formats it knows, which are:
|
||||
|
||||
* `dd/MMM/YYYY:HH:mm:ss Z`
|
||||
* `EEE MMM dd HH:mm zzz YYYY`
|
||||
* `EEE MMM dd HH:mm:ss YYYY`
|
||||
* `EEE MMM dd HH:mm:ss zzz YYYY`
|
||||
* `EEE MMM dd YYYY HH:mm zzz`
|
||||
* `EEE MMM dd YYYY HH:mm:ss zzz`
|
||||
* `EEE, dd MMM YYYY HH:mm Z`
|
||||
* `EEE, dd MMM YYYY HH:mm ZZ`
|
||||
* `EEE, dd MMM YYYY HH:mm:ss Z`
|
||||
* `EEE, dd MMM YYYY HH:mm:ss ZZ`
|
||||
* `ISO8601`
|
||||
* `MMM d HH:mm:ss`
|
||||
* `MMM d HH:mm:ss,SSS`
|
||||
* `MMM d YYYY HH:mm:ss`
|
||||
* `MMM dd HH:mm:ss`
|
||||
* `MMM dd HH:mm:ss,SSS`
|
||||
* `MMM dd YYYY HH:mm:ss`
|
||||
* `MMM dd, YYYY K:mm:ss a`
|
||||
* `TAI64N`
|
||||
* `UNIX`
|
||||
* `UNIX_MS`
|
||||
* `YYYY-MM-dd HH:mm:ss`
|
||||
* `YYYY-MM-dd HH:mm:ss,SSS`
|
||||
* `YYYY-MM-dd HH:mm:ss,SSS Z`
|
||||
* `YYYY-MM-dd HH:mm:ss,SSSZ`
|
||||
* `YYYY-MM-dd HH:mm:ss,SSSZZ`
|
||||
* `YYYY-MM-dd HH:mm:ssZ`
|
||||
* `YYYY-MM-dd HH:mm:ssZZ`
|
||||
* `YYYYMMddHHmmss`
|
||||
|
||||
--
|
||||
|
||||
==== Request Body
|
||||
|
||||
The text file that you want to analyze. It must contain data that is suitable to
|
||||
be ingested into {es}. It does not need to be in JSON format and it does not
|
||||
need to be UTF-8 encoded. The size is limited to the {es} HTTP receive buffer
|
||||
size, which defaults to 100 Mb.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
You must have `monitor_ml`, or `monitor` cluster privileges to use this API.
|
||||
For more information, see {stack-ov}/security-privileges.html[Security Privileges].
|
||||
|
||||
|
||||
[[ml-find-file-structure-examples]]
|
||||
==== Examples
|
||||
|
||||
Suppose you have a newline-delimited JSON file that contains information about
|
||||
some books. You can send the contents to the `find_file_structure` endpoint:
|
||||
|
||||
[source,js]
|
||||
----
|
||||
POST _xpack/ml/find_file_structure
|
||||
{"name": "Leviathan Wakes", "author": "James S.A. Corey", "release_date": "2011-06-02", "page_count": 561}
|
||||
{"name": "Hyperion", "author": "Dan Simmons", "release_date": "1989-05-26", "page_count": 482}
|
||||
{"name": "Dune", "author": "Frank Herbert", "release_date": "1965-06-01", "page_count": 604}
|
||||
{"name": "Dune Messiah", "author": "Frank Herbert", "release_date": "1969-10-15", "page_count": 331}
|
||||
{"name": "Children of Dune", "author": "Frank Herbert", "release_date": "1976-04-21", "page_count": 408}
|
||||
{"name": "God Emperor of Dune", "author": "Frank Herbert", "release_date": "1981-05-28", "page_count": 454}
|
||||
{"name": "Consider Phlebas", "author": "Iain M. Banks", "release_date": "1987-04-23", "page_count": 471}
|
||||
{"name": "Pandora's Star", "author": "Peter F. Hamilton", "release_date": "2004-03-02", "page_count": 768}
|
||||
{"name": "Revelation Space", "author": "Alastair Reynolds", "release_date": "2000-03-15", "page_count": 585}
|
||||
{"name": "A Fire Upon the Deep", "author": "Vernor Vinge", "release_date": "1992-06-01", "page_count": 613}
|
||||
{"name": "Ender's Game", "author": "Orson Scott Card", "release_date": "1985-06-01", "page_count": 324}
|
||||
{"name": "1984", "author": "George Orwell", "release_date": "1985-06-01", "page_count": 328}
|
||||
{"name": "Fahrenheit 451", "author": "Ray Bradbury", "release_date": "1953-10-15", "page_count": 227}
|
||||
{"name": "Brave New World", "author": "Aldous Huxley", "release_date": "1932-06-01", "page_count": 268}
|
||||
{"name": "Foundation", "author": "Isaac Asimov", "release_date": "1951-06-01", "page_count": 224}
|
||||
{"name": "The Giver", "author": "Lois Lowry", "release_date": "1993-04-26", "page_count": 208}
|
||||
{"name": "Slaughterhouse-Five", "author": "Kurt Vonnegut", "release_date": "1969-06-01", "page_count": 275}
|
||||
{"name": "The Hitchhiker's Guide to the Galaxy", "author": "Douglas Adams", "release_date": "1979-10-12", "page_count": 180}
|
||||
{"name": "Snow Crash", "author": "Neal Stephenson", "release_date": "1992-06-01", "page_count": 470}
|
||||
{"name": "Neuromancer", "author": "William Gibson", "release_date": "1984-07-01", "page_count": 271}
|
||||
{"name": "The Handmaid's Tale", "author": "Margaret Atwood", "release_date": "1985-06-01", "page_count": 311}
|
||||
{"name": "Starship Troopers", "author": "Robert A. Heinlein", "release_date": "1959-12-01", "page_count": 335}
|
||||
{"name": "The Left Hand of Darkness", "author": "Ursula K. Le Guin", "release_date": "1969-06-01", "page_count": 304}
|
||||
{"name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288}
|
||||
----
|
||||
// CONSOLE
|
||||
// TEST
|
||||
|
||||
If the request does not encounter errors, you receive the following result:
|
||||
[source,js]
|
||||
----
|
||||
{
|
||||
"num_lines_analyzed" : 24, <1>
|
||||
"num_messages_analyzed" : 24, <2>
|
||||
"sample_start" : "{\"name\": \"Leviathan Wakes\", \"author\": \"James S.A. Corey\", \"release_date\": \"2011-06-02\", \"page_count\": 561}\n{\"name\": \"Hyperion\", \"author\": \"Dan Simmons\", \"release_date\": \"1989-05-26\", \"page_count\": 482}\n", <3>
|
||||
"charset" : "UTF-8", <4>
|
||||
"has_byte_order_marker" : false, <5>
|
||||
"format" : "json", <6>
|
||||
"need_client_timezone" : false, <7>
|
||||
"mappings" : { <8>
|
||||
"author" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"name" : {
|
||||
"type" : "keyword"
|
||||
},
|
||||
"page_count" : {
|
||||
"type" : "long"
|
||||
},
|
||||
"release_date" : {
|
||||
"type" : "keyword"
|
||||
}
|
||||
},
|
||||
"field_stats" : { <9>
|
||||
"author" : {
|
||||
"count" : 24,
|
||||
"cardinality" : 20,
|
||||
"top_hits" : [
|
||||
{
|
||||
"value" : "Frank Herbert",
|
||||
"count" : 4
|
||||
},
|
||||
{
|
||||
"value" : "Robert A. Heinlein",
|
||||
"count" : 2
|
||||
},
|
||||
{
|
||||
"value" : "Alastair Reynolds",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Aldous Huxley",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Dan Simmons",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Douglas Adams",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "George Orwell",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Iain M. Banks",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Isaac Asimov",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "James S.A. Corey",
|
||||
"count" : 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"name" : {
|
||||
"count" : 24,
|
||||
"cardinality" : 24,
|
||||
"top_hits" : [
|
||||
{
|
||||
"value" : "1984",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "A Fire Upon the Deep",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Brave New World",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Children of Dune",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Consider Phlebas",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Dune",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Dune Messiah",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Ender's Game",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Fahrenheit 451",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "Foundation",
|
||||
"count" : 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"page_count" : {
|
||||
"count" : 24,
|
||||
"cardinality" : 24,
|
||||
"min_value" : 180,
|
||||
"max_value" : 768,
|
||||
"mean_value" : 387.0833333333333,
|
||||
"median_value" : 329.5,
|
||||
"top_hits" : [
|
||||
{
|
||||
"value" : 180,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 208,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 224,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 227,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 268,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 271,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 275,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 288,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 304,
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : 311,
|
||||
"count" : 1
|
||||
}
|
||||
]
|
||||
},
|
||||
"release_date" : {
|
||||
"count" : 24,
|
||||
"cardinality" : 20,
|
||||
"top_hits" : [
|
||||
{
|
||||
"value" : "1985-06-01",
|
||||
"count" : 3
|
||||
},
|
||||
{
|
||||
"value" : "1969-06-01",
|
||||
"count" : 2
|
||||
},
|
||||
{
|
||||
"value" : "1992-06-01",
|
||||
"count" : 2
|
||||
},
|
||||
{
|
||||
"value" : "1932-06-01",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "1951-06-01",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "1953-10-15",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "1959-12-01",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "1965-06-01",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "1966-04-01",
|
||||
"count" : 1
|
||||
},
|
||||
{
|
||||
"value" : "1969-10-15",
|
||||
"count" : 1
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
----
|
||||
// TESTRESPONSE[s/"sample_start" : ".*",/"sample_start" : "$body.sample_start",/]
|
||||
// The substitution is because the "file" is pre-processed by the test harness,
|
||||
// so the fields may get reordered in the JSON the endpoint sees
|
||||
|
||||
<1> `num_lines_analyzed` indicates how many lines of the file were analyzed.
|
||||
<2> `num_messages_analyzed` indicates how many distinct messages the lines contained.
|
||||
For ND-JSON, this value is the same as `num_lines_analyzed`. For other file
|
||||
formats, messages can span several lines.
|
||||
<3> `sample_start` reproduces the first two messages in the file verbatim. This
|
||||
may help to diagnose parse errors or accidental uploads of the wrong file.
|
||||
<4> `charset` indicates the character encoding used to parse the file.
|
||||
<5> For UTF character encodings, `has_byte_order_marker` indicates whether the
|
||||
file begins with a byte order marker.
|
||||
<6> `format` is one of `json`, `xml`, `delimited` or `semi_structured_text`.
|
||||
<7> If a timestamp format is detected that does not include a timezone,
|
||||
`need_client_timezone` will be `true`. The server that parses the file must
|
||||
therefore be told the correct timezone by the client.
|
||||
<8> `mappings` contains some suitable mappings for an index into which the data
|
||||
could be ingested. In this case, the `release_date` field has been given a
|
||||
`keyword` type as it is not considered specific enough to convert to the
|
||||
`date` type.
|
||||
<9> `field_stats` contains the most common values of each field, plus basic
|
||||
numeric statistics for the numeric `page_count` field. This information
|
||||
may provide clues that the data needs to be cleaned or transformed prior
|
||||
to use by other {ml} functionality.
|
||||
|
@ -70,6 +70,12 @@ machine learning APIs and in advanced job configuration options in Kibana.
|
||||
* <<ml-get-influencer,Get influencers>>
|
||||
* <<ml-get-record,Get records>>
|
||||
|
||||
[float]
|
||||
[[ml-api-file-structure-endpoint]]
|
||||
=== File Structure
|
||||
|
||||
* <<ml-find-file-structure,Find file structure>>
|
||||
|
||||
//ADD
|
||||
include::post-calendar-event.asciidoc[]
|
||||
include::put-calendar-job.asciidoc[]
|
||||
@ -89,6 +95,8 @@ include::delete-forecast.asciidoc[]
|
||||
include::delete-job.asciidoc[]
|
||||
include::delete-calendar-job.asciidoc[]
|
||||
include::delete-snapshot.asciidoc[]
|
||||
//FIND
|
||||
include::find-file-structure.asciidoc[]
|
||||
//FLUSH
|
||||
include::flush-job.asciidoc[]
|
||||
//FORECAST
|
||||
@ -126,3 +134,4 @@ include::update-snapshot.asciidoc[]
|
||||
//VALIDATE
|
||||
//include::validate-detector.asciidoc[]
|
||||
//include::validate-job.asciidoc[]
|
||||
|
||||
|
@ -1,22 +1,13 @@
|
||||
[[node.name]]
|
||||
=== `node.name`
|
||||
|
||||
By default, Elasticsearch will use the first seven characters of the randomly
|
||||
generated UUID as the node id. Note that the node id is persisted and does
|
||||
not change when a node restarts and therefore the default node name will also
|
||||
not change.
|
||||
|
||||
It is worth configuring a more meaningful name which will also have the
|
||||
advantage of persisting after restarting the node:
|
||||
Elasticsearch uses `node.name` as a human readable identifier for a
|
||||
particular instance of Elasticsearch so it is included in the response
|
||||
of many APIs. It defaults to the hostname that the machine has when
|
||||
Elasticsearch starts but can be configured explicitly in
|
||||
`elasticsearch.yml` as follows:
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------------------------
|
||||
node.name: prod-data-2
|
||||
--------------------------------------------------
|
||||
|
||||
The `node.name` can also be set to the server's HOSTNAME as follows:
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------------------------
|
||||
node.name: ${HOSTNAME}
|
||||
--------------------------------------------------
|
||||
|
@ -4,10 +4,9 @@
|
||||
Elasticsearch uses https://logging.apache.org/log4j/2.x/[Log4j 2] for
|
||||
logging. Log4j 2 can be configured using the log4j2.properties
|
||||
file. Elasticsearch exposes three properties, `${sys:es.logs.base_path}`,
|
||||
`${sys:es.logs.cluster_name}`, and `${sys:es.logs.node_name}` (if the node name
|
||||
is explicitly set via `node.name`) that can be referenced in the configuration
|
||||
file to determine the location of the log files. The property
|
||||
`${sys:es.logs.base_path}` will resolve to the log directory,
|
||||
`${sys:es.logs.cluster_name}`, and `${sys:es.logs.node_name}` that can be
|
||||
referenced in the configuration file to determine the location of the log
|
||||
files. The property `${sys:es.logs.base_path}` will resolve to the log directory,
|
||||
`${sys:es.logs.cluster_name}` will resolve to the cluster name (used as the
|
||||
prefix of log filenames in the default configuration), and
|
||||
`${sys:es.logs.node_name}` will resolve to the node name (if the node name is
|
||||
|
@ -37,13 +37,19 @@ Same as `CEIL`
|
||||
|
||||
https://en.wikipedia.org/wiki/E_%28mathematical_constant%29[Euler's number], returns `2.7182818284590452354`
|
||||
|
||||
* https://en.wikipedia.org/wiki/Exponential_function[e^x^] (`EXP`)
|
||||
|
||||
* https://en.wikipedia.org/wiki/Rounding#Round_half_up[Round] (`ROUND`)
|
||||
["source","sql",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-specs}/math.sql-spec[exp]
|
||||
--------------------------------------------------
|
||||
|
||||
// TODO make the example in the tests presentable
|
||||
|
||||
NOTE: This rounds "half up" meaning that `ROUND(-1.5)` results in `-1`.
|
||||
* https://docs.oracle.com/javase/8/docs/api/java/lang/Math.html#expm1-double-[e^x^ - 1] (`EXPM1`)
|
||||
|
||||
["source","sql",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-specs}/math.sql-spec[expm1]
|
||||
--------------------------------------------------
|
||||
|
||||
* https://en.wikipedia.org/wiki/Floor_and_ceiling_functions[Floor] (`FLOOR`)
|
||||
|
||||
@ -63,6 +69,36 @@ include-tagged::{sql-specs}/math.sql-spec[log]
|
||||
include-tagged::{sql-specs}/math.sql-spec[log10]
|
||||
--------------------------------------------------
|
||||
|
||||
* `ROUND`
|
||||
|
||||
.Synopsis:
|
||||
[source, sql]
|
||||
----
|
||||
ROUND(numeric_exp<1>[, integer_exp<2>])
|
||||
----
|
||||
*Input*:
|
||||
|
||||
<1> numeric expression
|
||||
<2> integer expression; optional
|
||||
|
||||
*Output*: numeric
|
||||
|
||||
.Description:
|
||||
Returns `numeric_exp` rounded to `integer_exp` places right of the decimal point. If `integer_exp` is negative,
|
||||
`numeric_exp` is rounded to |`integer_exp`| places to the left of the decimal point. If `integer_exp` is omitted,
|
||||
the function will perform as if `integer_exp` would be 0. The returned numeric data type is the same as the data type
|
||||
of `numeric_exp`.
|
||||
|
||||
["source","sql",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-specs}/docs.csv-spec[mathRoundWithPositiveParameter]
|
||||
--------------------------------------------------
|
||||
|
||||
["source","sql",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-specs}/docs.csv-spec[mathRoundWithNegativeParameter]
|
||||
--------------------------------------------------
|
||||
|
||||
* https://en.wikipedia.org/wiki/Square_root[Square root] (`SQRT`)
|
||||
|
||||
["source","sql",subs="attributes,callouts,macros"]
|
||||
@ -70,18 +106,34 @@ include-tagged::{sql-specs}/math.sql-spec[log10]
|
||||
include-tagged::{sql-specs}/math.sql-spec[sqrt]
|
||||
--------------------------------------------------
|
||||
|
||||
* https://en.wikipedia.org/wiki/Exponential_function[e^x^] (`EXP`)
|
||||
* `TRUNCATE`
|
||||
|
||||
.Synopsis:
|
||||
[source, sql]
|
||||
----
|
||||
TRUNCATE(numeric_exp<1>[, integer_exp<2>])
|
||||
----
|
||||
*Input*:
|
||||
|
||||
<1> numeric expression
|
||||
<2> integer expression; optional
|
||||
|
||||
*Output*: numeric
|
||||
|
||||
.Description:
|
||||
Returns `numeric_exp` truncated to `integer_exp` places right of the decimal point. If `integer_exp` is negative,
|
||||
`numeric_exp` is truncated to |`integer_exp`| places to the left of the decimal point. If `integer_exp` is omitted,
|
||||
the function will perform as if `integer_exp` would be 0. The returned numeric data type is the same as the data type
|
||||
of `numeric_exp`.
|
||||
|
||||
["source","sql",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-specs}/math.sql-spec[exp]
|
||||
include-tagged::{sql-specs}/docs.csv-spec[mathTruncateWithPositiveParameter]
|
||||
--------------------------------------------------
|
||||
|
||||
* https://docs.oracle.com/javase/8/docs/api/java/lang/Math.html#expm1-double-[e^x^ - 1] (`EXPM1`)
|
||||
|
||||
["source","sql",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-specs}/math.sql-spec[expm1]
|
||||
include-tagged::{sql-specs}/docs.csv-spec[mathTruncateWithNegativeParameter]
|
||||
--------------------------------------------------
|
||||
|
||||
==== Trigonometric
|
||||
|
@ -126,7 +126,6 @@ import org.elasticsearch.index.analysis.PreBuiltAnalyzerProviderFactory;
|
||||
import org.elasticsearch.index.analysis.PreConfiguredCharFilter;
|
||||
import org.elasticsearch.index.analysis.PreConfiguredTokenFilter;
|
||||
import org.elasticsearch.index.analysis.PreConfiguredTokenizer;
|
||||
import org.elasticsearch.index.analysis.SoraniNormalizationFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenizerFactory;
|
||||
import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider;
|
||||
|
@ -16,13 +16,15 @@
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.ckb.SoraniNormalizationFilter;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
|
||||
|
||||
/**
|
||||
* Factory for {@link SoraniNormalizationFilter}
|
@ -24,7 +24,6 @@ import org.apache.lucene.analysis.en.PorterStemFilterFactory;
|
||||
import org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilterFactory;
|
||||
import org.apache.lucene.analysis.reverse.ReverseStringFilterFactory;
|
||||
import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory;
|
||||
import org.elasticsearch.index.analysis.SoraniNormalizationFilterFactory;
|
||||
import org.elasticsearch.index.analysis.SynonymTokenFilterFactory;
|
||||
import org.elasticsearch.indices.analysis.AnalysisFactoryTestCase;
|
||||
|
||||
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.settings.SettingsFilter;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.grok.Grok;
|
||||
import org.elasticsearch.grok.ThreadWatchdog;
|
||||
import org.elasticsearch.ingest.PipelineProcessor;
|
||||
import org.elasticsearch.ingest.Processor;
|
||||
import org.elasticsearch.plugins.ActionPlugin;
|
||||
import org.elasticsearch.plugins.IngestPlugin;
|
||||
|
@ -110,4 +110,4 @@ teardown:
|
||||
pipeline: "outer"
|
||||
body: {}
|
||||
- match: { error.root_cause.0.type: "exception" }
|
||||
- match: { error.root_cause.0.reason: "java.lang.IllegalArgumentException: java.lang.IllegalStateException: Recursive invocation of pipeline [inner] detected." }
|
||||
- match: { error.root_cause.0.reason: "java.lang.IllegalArgumentException: java.lang.IllegalStateException: Cycle detected for pipeline: inner" }
|
||||
|
@ -605,3 +605,150 @@ teardown:
|
||||
- length: { docs.0.processor_results.1: 2 }
|
||||
- match: { docs.0.processor_results.1.tag: "rename-1" }
|
||||
- match: { docs.0.processor_results.1.doc._source.new_status: 200 }
|
||||
|
||||
---
|
||||
"Test verbose simulate with Pipeline Processor with Circular Pipelines":
|
||||
- do:
|
||||
ingest.put_pipeline:
|
||||
id: "outer"
|
||||
body: >
|
||||
{
|
||||
"description" : "outer pipeline",
|
||||
"processors" : [
|
||||
{
|
||||
"pipeline" : {
|
||||
"pipeline": "inner"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
- match: { acknowledged: true }
|
||||
|
||||
- do:
|
||||
ingest.put_pipeline:
|
||||
id: "inner"
|
||||
body: >
|
||||
{
|
||||
"description" : "inner pipeline",
|
||||
"processors" : [
|
||||
{
|
||||
"pipeline" : {
|
||||
"pipeline": "outer"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
- match: { acknowledged: true }
|
||||
|
||||
- do:
|
||||
catch: /illegal_state_exception/
|
||||
ingest.simulate:
|
||||
verbose: true
|
||||
body: >
|
||||
{
|
||||
"pipeline": {
|
||||
"processors" : [
|
||||
{
|
||||
"pipeline" : {
|
||||
"pipeline": "outer"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
,
|
||||
"docs": [
|
||||
{
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_id": "id",
|
||||
"_source": {
|
||||
"field1": "123.42 400 <foo>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
- match: { error.root_cause.0.type: "illegal_state_exception" }
|
||||
- match: { error.root_cause.0.reason: "Cycle detected for pipeline: inner" }
|
||||
|
||||
---
|
||||
"Test verbose simulate with Pipeline Processor with Multiple Pipelines":
|
||||
- do:
|
||||
ingest.put_pipeline:
|
||||
id: "pipeline1"
|
||||
body: >
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "pipeline1",
|
||||
"value": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"pipeline": "pipeline2"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
- match: { acknowledged: true }
|
||||
|
||||
- do:
|
||||
ingest.put_pipeline:
|
||||
id: "pipeline2"
|
||||
body: >
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "pipeline2",
|
||||
"value": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
- match: { acknowledged: true }
|
||||
|
||||
- do:
|
||||
ingest.simulate:
|
||||
verbose: true
|
||||
body: >
|
||||
{
|
||||
"pipeline": {
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "pipeline0",
|
||||
"value": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"pipeline": "pipeline1"
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"docs": [
|
||||
{
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_id": "id",
|
||||
"_source": {
|
||||
"field1": "123.42 400 <foo>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
- length: { docs: 1 }
|
||||
- length: { docs.0.processor_results: 3 }
|
||||
- match: { docs.0.processor_results.0.doc._source.pipeline0: true }
|
||||
- is_false: docs.0.processor_results.0.doc._source.pipeline1
|
||||
- is_false: docs.0.processor_results.0.doc._source.pipeline2
|
||||
- match: { docs.0.processor_results.1.doc._source.pipeline0: true }
|
||||
- match: { docs.0.processor_results.1.doc._source.pipeline1: true }
|
||||
- is_false: docs.0.processor_results.1.doc._source.pipeline2
|
||||
- match: { docs.0.processor_results.2.doc._source.pipeline0: true }
|
||||
- match: { docs.0.processor_results.2.doc._source.pipeline1: true }
|
||||
- match: { docs.0.processor_results.2.doc._source.pipeline2: true }
|
||||
|
||||
|
@ -63,16 +63,16 @@ public final class Whitelist {
|
||||
/** The {@link List} of all the whitelisted static Painless methods. */
|
||||
public final List<WhitelistMethod> whitelistImportedMethods;
|
||||
|
||||
/** The {@link List} of all the whitelisted Painless bindings. */
|
||||
public final List<WhitelistBinding> whitelistBindings;
|
||||
/** The {@link List} of all the whitelisted Painless class bindings. */
|
||||
public final List<WhitelistClassBinding> whitelistClassBindings;
|
||||
|
||||
/** Standard constructor. All values must be not {@code null}. */
|
||||
public Whitelist(ClassLoader classLoader, List<WhitelistClass> whitelistClasses,
|
||||
List<WhitelistMethod> whitelistImportedMethods, List<WhitelistBinding> whitelistBindings) {
|
||||
List<WhitelistMethod> whitelistImportedMethods, List<WhitelistClassBinding> whitelistClassBindings) {
|
||||
|
||||
this.classLoader = Objects.requireNonNull(classLoader);
|
||||
this.whitelistClasses = Collections.unmodifiableList(Objects.requireNonNull(whitelistClasses));
|
||||
this.whitelistImportedMethods = Collections.unmodifiableList(Objects.requireNonNull(whitelistImportedMethods));
|
||||
this.whitelistBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistBindings));
|
||||
this.whitelistClassBindings = Collections.unmodifiableList(Objects.requireNonNull(whitelistClassBindings));
|
||||
}
|
||||
}
|
||||
|
@ -23,23 +23,23 @@ import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A binding represents a method call that stores state. Each binding class must have exactly one
|
||||
* public constructor and one public method excluding those inherited directly from {@link Object}.
|
||||
* The canonical type name parameters provided must match those of the constructor and method combined.
|
||||
* The constructor for a binding class will be called when the binding method is called for the first
|
||||
* time at which point state may be stored for the arguments passed into the constructor. The method
|
||||
* for a binding class will be called each time the binding method is called and may use the previously
|
||||
* stored state.
|
||||
* A class binding represents a method call that stores state. Each class binding's Java class must
|
||||
* have exactly one public constructor and one public method excluding those inherited directly
|
||||
* from {@link Object}. The canonical type name parameters provided must match those of the
|
||||
* constructor and method combined. The constructor for a class binding's Java class will be called
|
||||
* when the binding method is called for the first time at which point state may be stored for the
|
||||
* arguments passed into the constructor. The method for a binding class will be called each time
|
||||
* the binding method is called and may use the previously stored state.
|
||||
*/
|
||||
public class WhitelistBinding {
|
||||
public class WhitelistClassBinding {
|
||||
|
||||
/** Information about where this constructor was whitelisted from. */
|
||||
public final String origin;
|
||||
|
||||
/** The Java class name this binding represents. */
|
||||
/** The Java class name this class binding targets. */
|
||||
public final String targetJavaClassName;
|
||||
|
||||
/** The method name for this binding. */
|
||||
/** The method name for this class binding. */
|
||||
public final String methodName;
|
||||
|
||||
/**
|
||||
@ -54,7 +54,7 @@ public class WhitelistBinding {
|
||||
public final List<String> canonicalTypeNameParameters;
|
||||
|
||||
/** Standard constructor. All values must be not {@code null}. */
|
||||
public WhitelistBinding(String origin, String targetJavaClassName,
|
||||
public WhitelistClassBinding(String origin, String targetJavaClassName,
|
||||
String methodName, String returnCanonicalTypeName, List<String> canonicalTypeNameParameters) {
|
||||
|
||||
this.origin = Objects.requireNonNull(origin);
|
@ -134,7 +134,7 @@ public final class WhitelistLoader {
|
||||
public static Whitelist loadFromResourceFiles(Class<?> resource, String... filepaths) {
|
||||
List<WhitelistClass> whitelistClasses = new ArrayList<>();
|
||||
List<WhitelistMethod> whitelistStatics = new ArrayList<>();
|
||||
List<WhitelistBinding> whitelistBindings = new ArrayList<>();
|
||||
List<WhitelistClassBinding> whitelistClassBindings = new ArrayList<>();
|
||||
|
||||
// Execute a single pass through the whitelist text files. This will gather all the
|
||||
// constructors, methods, augmented methods, and fields for each whitelisted class.
|
||||
@ -292,7 +292,7 @@ public final class WhitelistLoader {
|
||||
whitelistStatics.add(new WhitelistMethod(origin, targetJavaClassName,
|
||||
methodName, returnCanonicalTypeName, Arrays.asList(canonicalTypeNameParameters)));
|
||||
} else if ("bound_to".equals(staticImportType)) {
|
||||
whitelistBindings.add(new WhitelistBinding(origin, targetJavaClassName,
|
||||
whitelistClassBindings.add(new WhitelistClassBinding(origin, targetJavaClassName,
|
||||
methodName, returnCanonicalTypeName, Arrays.asList(canonicalTypeNameParameters)));
|
||||
} else {
|
||||
throw new IllegalArgumentException("invalid static import definition: " +
|
||||
@ -392,7 +392,7 @@ public final class WhitelistLoader {
|
||||
|
||||
ClassLoader loader = AccessController.doPrivileged((PrivilegedAction<ClassLoader>)resource::getClassLoader);
|
||||
|
||||
return new Whitelist(loader, whitelistClasses, whitelistStatics, whitelistBindings);
|
||||
return new Whitelist(loader, whitelistClasses, whitelistStatics, whitelistClassBindings);
|
||||
}
|
||||
|
||||
private WhitelistLoader() {}
|
||||
|
@ -347,17 +347,39 @@ public final class MethodWriter extends GeneratorAdapter {
|
||||
}
|
||||
|
||||
switch (operation) {
|
||||
case MUL: math(GeneratorAdapter.MUL, getType(clazz)); break;
|
||||
case DIV: math(GeneratorAdapter.DIV, getType(clazz)); break;
|
||||
case REM: math(GeneratorAdapter.REM, getType(clazz)); break;
|
||||
case ADD: math(GeneratorAdapter.ADD, getType(clazz)); break;
|
||||
case SUB: math(GeneratorAdapter.SUB, getType(clazz)); break;
|
||||
case LSH: math(GeneratorAdapter.SHL, getType(clazz)); break;
|
||||
case USH: math(GeneratorAdapter.USHR, getType(clazz)); break;
|
||||
case RSH: math(GeneratorAdapter.SHR, getType(clazz)); break;
|
||||
case BWAND: math(GeneratorAdapter.AND, getType(clazz)); break;
|
||||
case XOR: math(GeneratorAdapter.XOR, getType(clazz)); break;
|
||||
case BWOR: math(GeneratorAdapter.OR, getType(clazz)); break;
|
||||
case MUL:
|
||||
math(GeneratorAdapter.MUL, getType(clazz));
|
||||
break;
|
||||
case DIV:
|
||||
math(GeneratorAdapter.DIV, getType(clazz));
|
||||
break;
|
||||
case REM:
|
||||
math(GeneratorAdapter.REM, getType(clazz));
|
||||
break;
|
||||
case ADD:
|
||||
math(GeneratorAdapter.ADD, getType(clazz));
|
||||
break;
|
||||
case SUB:
|
||||
math(GeneratorAdapter.SUB, getType(clazz));
|
||||
break;
|
||||
case LSH:
|
||||
math(GeneratorAdapter.SHL, getType(clazz));
|
||||
break;
|
||||
case USH:
|
||||
math(GeneratorAdapter.USHR, getType(clazz));
|
||||
break;
|
||||
case RSH:
|
||||
math(GeneratorAdapter.SHR, getType(clazz));
|
||||
break;
|
||||
case BWAND:
|
||||
math(GeneratorAdapter.AND, getType(clazz));
|
||||
break;
|
||||
case XOR:
|
||||
math(GeneratorAdapter.XOR, getType(clazz));
|
||||
break;
|
||||
case BWOR:
|
||||
math(GeneratorAdapter.OR, getType(clazz));
|
||||
break;
|
||||
default:
|
||||
throw location.createError(new IllegalStateException("Illegal tree structure."));
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.List;
|
||||
|
||||
public class PainlessBinding {
|
||||
public class PainlessClassBinding {
|
||||
|
||||
public final Constructor<?> javaConstructor;
|
||||
public final Method javaMethod;
|
||||
@ -31,7 +31,7 @@ public class PainlessBinding {
|
||||
public final Class<?> returnType;
|
||||
public final List<Class<?>> typeParameters;
|
||||
|
||||
PainlessBinding(Constructor<?> javaConstructor, Method javaMethod, Class<?> returnType, List<Class<?>> typeParameters) {
|
||||
PainlessClassBinding(Constructor<?> javaConstructor, Method javaMethod, Class<?> returnType, List<Class<?>> typeParameters) {
|
||||
this.javaConstructor = javaConstructor;
|
||||
this.javaMethod = javaMethod;
|
||||
|
@ -38,23 +38,23 @@ public final class PainlessLookup {
|
||||
private final Map<Class<?>, PainlessClass> classesToPainlessClasses;
|
||||
|
||||
private final Map<String, PainlessMethod> painlessMethodKeysToImportedPainlessMethods;
|
||||
private final Map<String, PainlessBinding> painlessMethodKeysToPainlessBindings;
|
||||
private final Map<String, PainlessClassBinding> painlessMethodKeysToPainlessClassBindings;
|
||||
|
||||
PainlessLookup(Map<String, Class<?>> canonicalClassNamesToClasses, Map<Class<?>, PainlessClass> classesToPainlessClasses,
|
||||
Map<String, PainlessMethod> painlessMethodKeysToImportedPainlessMethods,
|
||||
Map<String, PainlessBinding> painlessMethodKeysToPainlessBindings) {
|
||||
Map<String, PainlessClassBinding> painlessMethodKeysToPainlessClassBindings) {
|
||||
|
||||
Objects.requireNonNull(canonicalClassNamesToClasses);
|
||||
Objects.requireNonNull(classesToPainlessClasses);
|
||||
|
||||
Objects.requireNonNull(painlessMethodKeysToImportedPainlessMethods);
|
||||
Objects.requireNonNull(painlessMethodKeysToPainlessBindings);
|
||||
Objects.requireNonNull(painlessMethodKeysToPainlessClassBindings);
|
||||
|
||||
this.canonicalClassNamesToClasses = Collections.unmodifiableMap(canonicalClassNamesToClasses);
|
||||
this.classesToPainlessClasses = Collections.unmodifiableMap(classesToPainlessClasses);
|
||||
|
||||
this.painlessMethodKeysToImportedPainlessMethods = Collections.unmodifiableMap(painlessMethodKeysToImportedPainlessMethods);
|
||||
this.painlessMethodKeysToPainlessBindings = Collections.unmodifiableMap(painlessMethodKeysToPainlessBindings);
|
||||
this.painlessMethodKeysToPainlessClassBindings = Collections.unmodifiableMap(painlessMethodKeysToPainlessClassBindings);
|
||||
}
|
||||
|
||||
public boolean isValidCanonicalClassName(String canonicalClassName) {
|
||||
@ -182,12 +182,12 @@ public final class PainlessLookup {
|
||||
return painlessMethodKeysToImportedPainlessMethods.get(painlessMethodKey);
|
||||
}
|
||||
|
||||
public PainlessBinding lookupPainlessBinding(String methodName, int arity) {
|
||||
public PainlessClassBinding lookupPainlessClassBinding(String methodName, int arity) {
|
||||
Objects.requireNonNull(methodName);
|
||||
|
||||
String painlessMethodKey = buildPainlessMethodKey(methodName, arity);
|
||||
|
||||
return painlessMethodKeysToPainlessBindings.get(painlessMethodKey);
|
||||
return painlessMethodKeysToPainlessClassBindings.get(painlessMethodKey);
|
||||
}
|
||||
|
||||
public PainlessMethod lookupFunctionalInterfacePainlessMethod(Class<?> targetClass) {
|
||||
|
@ -20,7 +20,7 @@
|
||||
package org.elasticsearch.painless.lookup;
|
||||
|
||||
import org.elasticsearch.painless.spi.Whitelist;
|
||||
import org.elasticsearch.painless.spi.WhitelistBinding;
|
||||
import org.elasticsearch.painless.spi.WhitelistClassBinding;
|
||||
import org.elasticsearch.painless.spi.WhitelistClass;
|
||||
import org.elasticsearch.painless.spi.WhitelistConstructor;
|
||||
import org.elasticsearch.painless.spi.WhitelistField;
|
||||
@ -156,14 +156,14 @@ public final class PainlessLookupBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
private static class PainlessBindingCacheKey {
|
||||
private static class PainlessClassBindingCacheKey {
|
||||
|
||||
private final Class<?> targetClass;
|
||||
private final String methodName;
|
||||
private final Class<?> methodReturnType;
|
||||
private final List<Class<?>> methodTypeParameters;
|
||||
|
||||
private PainlessBindingCacheKey(Class<?> targetClass,
|
||||
private PainlessClassBindingCacheKey(Class<?> targetClass,
|
||||
String methodName, Class<?> returnType, List<Class<?>> typeParameters) {
|
||||
|
||||
this.targetClass = targetClass;
|
||||
@ -182,7 +182,7 @@ public final class PainlessLookupBuilder {
|
||||
return false;
|
||||
}
|
||||
|
||||
PainlessBindingCacheKey that = (PainlessBindingCacheKey)object;
|
||||
PainlessClassBindingCacheKey that = (PainlessClassBindingCacheKey)object;
|
||||
|
||||
return Objects.equals(targetClass, that.targetClass) &&
|
||||
Objects.equals(methodName, that.methodName) &&
|
||||
@ -196,10 +196,10 @@ public final class PainlessLookupBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
private static final Map<PainlessConstructorCacheKey, PainlessConstructor> painlessConstructorCache = new HashMap<>();
|
||||
private static final Map<PainlessMethodCacheKey, PainlessMethod> painlessMethodCache = new HashMap<>();
|
||||
private static final Map<PainlessFieldCacheKey, PainlessField> painlessFieldCache = new HashMap<>();
|
||||
private static final Map<PainlessBindingCacheKey, PainlessBinding> painlessBindingCache = new HashMap<>();
|
||||
private static final Map<PainlessConstructorCacheKey, PainlessConstructor> painlessConstructorCache = new HashMap<>();
|
||||
private static final Map<PainlessMethodCacheKey, PainlessMethod> painlessMethodCache = new HashMap<>();
|
||||
private static final Map<PainlessFieldCacheKey, PainlessField> painlessFieldCache = new HashMap<>();
|
||||
private static final Map<PainlessClassBindingCacheKey, PainlessClassBinding> painlessClassBindingCache = new HashMap<>();
|
||||
|
||||
private static final Pattern CLASS_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$");
|
||||
private static final Pattern METHOD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$");
|
||||
@ -251,12 +251,12 @@ public final class PainlessLookupBuilder {
|
||||
whitelistStatic.canonicalTypeNameParameters);
|
||||
}
|
||||
|
||||
for (WhitelistBinding whitelistBinding : whitelist.whitelistBindings) {
|
||||
origin = whitelistBinding.origin;
|
||||
painlessLookupBuilder.addPainlessBinding(
|
||||
whitelist.classLoader, whitelistBinding.targetJavaClassName,
|
||||
whitelistBinding.methodName, whitelistBinding.returnCanonicalTypeName,
|
||||
whitelistBinding.canonicalTypeNameParameters);
|
||||
for (WhitelistClassBinding whitelistClassBinding : whitelist.whitelistClassBindings) {
|
||||
origin = whitelistClassBinding.origin;
|
||||
painlessLookupBuilder.addPainlessClassBinding(
|
||||
whitelist.classLoader, whitelistClassBinding.targetJavaClassName,
|
||||
whitelistClassBinding.methodName, whitelistClassBinding.returnCanonicalTypeName,
|
||||
whitelistClassBinding.canonicalTypeNameParameters);
|
||||
}
|
||||
}
|
||||
} catch (Exception exception) {
|
||||
@ -270,14 +270,14 @@ public final class PainlessLookupBuilder {
|
||||
private final Map<Class<?>, PainlessClassBuilder> classesToPainlessClassBuilders;
|
||||
|
||||
private final Map<String, PainlessMethod> painlessMethodKeysToImportedPainlessMethods;
|
||||
private final Map<String, PainlessBinding> painlessMethodKeysToPainlessBindings;
|
||||
private final Map<String, PainlessClassBinding> painlessMethodKeysToPainlessClassBindings;
|
||||
|
||||
public PainlessLookupBuilder() {
|
||||
canonicalClassNamesToClasses = new HashMap<>();
|
||||
classesToPainlessClassBuilders = new HashMap<>();
|
||||
|
||||
painlessMethodKeysToImportedPainlessMethods = new HashMap<>();
|
||||
painlessMethodKeysToPainlessBindings = new HashMap<>();
|
||||
painlessMethodKeysToPainlessClassBindings = new HashMap<>();
|
||||
}
|
||||
|
||||
private Class<?> canonicalTypeNameToType(String canonicalTypeName) {
|
||||
@ -909,8 +909,8 @@ public final class PainlessLookupBuilder {
|
||||
|
||||
String painlessMethodKey = buildPainlessMethodKey(methodName, typeParametersSize);
|
||||
|
||||
if (painlessMethodKeysToPainlessBindings.containsKey(painlessMethodKey)) {
|
||||
throw new IllegalArgumentException("imported method and binding cannot have the same name [" + methodName + "]");
|
||||
if (painlessMethodKeysToPainlessClassBindings.containsKey(painlessMethodKey)) {
|
||||
throw new IllegalArgumentException("imported method and class binding cannot have the same name [" + methodName + "]");
|
||||
}
|
||||
|
||||
PainlessMethod importedPainlessMethod = painlessMethodKeysToImportedPainlessMethods.get(painlessMethodKey);
|
||||
@ -945,7 +945,7 @@ public final class PainlessLookupBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
public void addPainlessBinding(ClassLoader classLoader, String targetJavaClassName,
|
||||
public void addPainlessClassBinding(ClassLoader classLoader, String targetJavaClassName,
|
||||
String methodName, String returnCanonicalTypeName, List<String> canonicalTypeNameParameters) {
|
||||
|
||||
Objects.requireNonNull(classLoader);
|
||||
@ -969,7 +969,7 @@ public final class PainlessLookupBuilder {
|
||||
Class<?> typeParameter = canonicalTypeNameToType(canonicalTypeNameParameter);
|
||||
|
||||
if (typeParameter == null) {
|
||||
throw new IllegalArgumentException("type parameter [" + canonicalTypeNameParameter + "] not found for binding " +
|
||||
throw new IllegalArgumentException("type parameter [" + canonicalTypeNameParameter + "] not found for class binding " +
|
||||
"[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]");
|
||||
}
|
||||
|
||||
@ -979,14 +979,14 @@ public final class PainlessLookupBuilder {
|
||||
Class<?> returnType = canonicalTypeNameToType(returnCanonicalTypeName);
|
||||
|
||||
if (returnType == null) {
|
||||
throw new IllegalArgumentException("return type [" + returnCanonicalTypeName + "] not found for binding " +
|
||||
throw new IllegalArgumentException("return type [" + returnCanonicalTypeName + "] not found for class binding " +
|
||||
"[[" + targetCanonicalClassName + "], [" + methodName + "], " + canonicalTypeNameParameters + "]");
|
||||
}
|
||||
|
||||
addPainlessBinding(targetClass, methodName, returnType, typeParameters);
|
||||
addPainlessClassBinding(targetClass, methodName, returnType, typeParameters);
|
||||
}
|
||||
|
||||
public void addPainlessBinding(Class<?> targetClass, String methodName, Class<?> returnType, List<Class<?>> typeParameters) {
|
||||
public void addPainlessClassBinding(Class<?> targetClass, String methodName, Class<?> returnType, List<Class<?>> typeParameters) {
|
||||
|
||||
Objects.requireNonNull(targetClass);
|
||||
Objects.requireNonNull(methodName);
|
||||
@ -994,7 +994,7 @@ public final class PainlessLookupBuilder {
|
||||
Objects.requireNonNull(typeParameters);
|
||||
|
||||
if (targetClass == def.class) {
|
||||
throw new IllegalArgumentException("cannot add binding as reserved class [" + DEF_CLASS_NAME + "]");
|
||||
throw new IllegalArgumentException("cannot add class binding as reserved class [" + DEF_CLASS_NAME + "]");
|
||||
}
|
||||
|
||||
String targetCanonicalClassName = typeToCanonicalTypeName(targetClass);
|
||||
@ -1005,7 +1005,8 @@ public final class PainlessLookupBuilder {
|
||||
for (Constructor<?> eachJavaConstructor : javaConstructors) {
|
||||
if (eachJavaConstructor.getDeclaringClass() == targetClass) {
|
||||
if (javaConstructor != null) {
|
||||
throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] cannot have multiple constructors");
|
||||
throw new IllegalArgumentException(
|
||||
"class binding [" + targetCanonicalClassName + "] cannot have multiple constructors");
|
||||
}
|
||||
|
||||
javaConstructor = eachJavaConstructor;
|
||||
@ -1013,7 +1014,7 @@ public final class PainlessLookupBuilder {
|
||||
}
|
||||
|
||||
if (javaConstructor == null) {
|
||||
throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] must have exactly one constructor");
|
||||
throw new IllegalArgumentException("class binding [" + targetCanonicalClassName + "] must have exactly one constructor");
|
||||
}
|
||||
|
||||
int constructorTypeParametersSize = javaConstructor.getParameterCount();
|
||||
@ -1023,26 +1024,26 @@ public final class PainlessLookupBuilder {
|
||||
|
||||
if (isValidType(typeParameter) == false) {
|
||||
throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " +
|
||||
"for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
"for class binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
}
|
||||
|
||||
Class<?> javaTypeParameter = javaConstructor.getParameterTypes()[typeParameterIndex];
|
||||
|
||||
if (isValidType(javaTypeParameter) == false) {
|
||||
throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " +
|
||||
"for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
"for class binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
}
|
||||
|
||||
if (javaTypeParameter != typeToJavaType(typeParameter)) {
|
||||
throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(javaTypeParameter) + "] " +
|
||||
"does not match the specified type parameter [" + typeToCanonicalTypeName(typeParameter) + "] " +
|
||||
"for binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
"for class binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
}
|
||||
}
|
||||
|
||||
if (METHOD_NAME_PATTERN.matcher(methodName).matches() == false) {
|
||||
throw new IllegalArgumentException(
|
||||
"invalid method name [" + methodName + "] for binding [" + targetCanonicalClassName + "].");
|
||||
"invalid method name [" + methodName + "] for class binding [" + targetCanonicalClassName + "].");
|
||||
}
|
||||
|
||||
Method[] javaMethods = targetClass.getMethods();
|
||||
@ -1051,7 +1052,7 @@ public final class PainlessLookupBuilder {
|
||||
for (Method eachJavaMethod : javaMethods) {
|
||||
if (eachJavaMethod.getDeclaringClass() == targetClass) {
|
||||
if (javaMethod != null) {
|
||||
throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] cannot have multiple methods");
|
||||
throw new IllegalArgumentException("class binding [" + targetCanonicalClassName + "] cannot have multiple methods");
|
||||
}
|
||||
|
||||
javaMethod = eachJavaMethod;
|
||||
@ -1059,7 +1060,7 @@ public final class PainlessLookupBuilder {
|
||||
}
|
||||
|
||||
if (javaMethod == null) {
|
||||
throw new IllegalArgumentException("binding [" + targetCanonicalClassName + "] must have exactly one method");
|
||||
throw new IllegalArgumentException("class binding [" + targetCanonicalClassName + "] must have exactly one method");
|
||||
}
|
||||
|
||||
int methodTypeParametersSize = javaMethod.getParameterCount();
|
||||
@ -1069,60 +1070,60 @@ public final class PainlessLookupBuilder {
|
||||
|
||||
if (isValidType(typeParameter) == false) {
|
||||
throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " +
|
||||
"for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
"for class binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
}
|
||||
|
||||
Class<?> javaTypeParameter = javaMethod.getParameterTypes()[typeParameterIndex];
|
||||
|
||||
if (isValidType(javaTypeParameter) == false) {
|
||||
throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(typeParameter) + "] not found " +
|
||||
"for binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
"for class binding [[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
}
|
||||
|
||||
if (javaTypeParameter != typeToJavaType(typeParameter)) {
|
||||
throw new IllegalArgumentException("type parameter [" + typeToCanonicalTypeName(javaTypeParameter) + "] " +
|
||||
"does not match the specified type parameter [" + typeToCanonicalTypeName(typeParameter) + "] " +
|
||||
"for binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
"for class binding [[" + targetClass.getCanonicalName() + "], " + typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
}
|
||||
}
|
||||
|
||||
if (javaMethod.getReturnType() != typeToJavaType(returnType)) {
|
||||
throw new IllegalArgumentException("return type [" + typeToCanonicalTypeName(javaMethod.getReturnType()) + "] " +
|
||||
"does not match the specified returned type [" + typeToCanonicalTypeName(returnType) + "] " +
|
||||
"for binding [[" + targetClass.getCanonicalName() + "], [" + methodName + "], " +
|
||||
"for class binding [[" + targetClass.getCanonicalName() + "], [" + methodName + "], " +
|
||||
typesToCanonicalTypeNames(typeParameters) + "]");
|
||||
}
|
||||
|
||||
String painlessMethodKey = buildPainlessMethodKey(methodName, constructorTypeParametersSize + methodTypeParametersSize);
|
||||
|
||||
if (painlessMethodKeysToImportedPainlessMethods.containsKey(painlessMethodKey)) {
|
||||
throw new IllegalArgumentException("binding and imported method cannot have the same name [" + methodName + "]");
|
||||
throw new IllegalArgumentException("class binding and imported method cannot have the same name [" + methodName + "]");
|
||||
}
|
||||
|
||||
PainlessBinding painlessBinding = painlessMethodKeysToPainlessBindings.get(painlessMethodKey);
|
||||
PainlessClassBinding painlessClassBinding = painlessMethodKeysToPainlessClassBindings.get(painlessMethodKey);
|
||||
|
||||
if (painlessBinding == null) {
|
||||
if (painlessClassBinding == null) {
|
||||
Constructor<?> finalJavaConstructor = javaConstructor;
|
||||
Method finalJavaMethod = javaMethod;
|
||||
|
||||
painlessBinding = painlessBindingCache.computeIfAbsent(
|
||||
new PainlessBindingCacheKey(targetClass, methodName, returnType, typeParameters),
|
||||
key -> new PainlessBinding(finalJavaConstructor, finalJavaMethod, returnType, typeParameters));
|
||||
painlessClassBinding = painlessClassBindingCache.computeIfAbsent(
|
||||
new PainlessClassBindingCacheKey(targetClass, methodName, returnType, typeParameters),
|
||||
key -> new PainlessClassBinding(finalJavaConstructor, finalJavaMethod, returnType, typeParameters));
|
||||
|
||||
painlessMethodKeysToPainlessBindings.put(painlessMethodKey, painlessBinding);
|
||||
} else if (painlessBinding.javaConstructor.equals(javaConstructor) == false ||
|
||||
painlessBinding.javaMethod.equals(javaMethod) == false ||
|
||||
painlessBinding.returnType != returnType ||
|
||||
painlessBinding.typeParameters.equals(typeParameters) == false) {
|
||||
throw new IllegalArgumentException("cannot have bindings " +
|
||||
painlessMethodKeysToPainlessClassBindings.put(painlessMethodKey, painlessClassBinding);
|
||||
} else if (painlessClassBinding.javaConstructor.equals(javaConstructor) == false ||
|
||||
painlessClassBinding.javaMethod.equals(javaMethod) == false ||
|
||||
painlessClassBinding.returnType != returnType ||
|
||||
painlessClassBinding.typeParameters.equals(typeParameters) == false) {
|
||||
throw new IllegalArgumentException("cannot have class bindings " +
|
||||
"[[" + targetCanonicalClassName + "], " +
|
||||
"[" + methodName + "], " +
|
||||
"[" + typeToCanonicalTypeName(returnType) + "], " +
|
||||
typesToCanonicalTypeNames(typeParameters) + "] and " +
|
||||
"[[" + targetCanonicalClassName + "], " +
|
||||
"[" + methodName + "], " +
|
||||
"[" + typeToCanonicalTypeName(painlessBinding.returnType) + "], " +
|
||||
typesToCanonicalTypeNames(painlessBinding.typeParameters) + "] and " +
|
||||
"[" + typeToCanonicalTypeName(painlessClassBinding.returnType) + "], " +
|
||||
typesToCanonicalTypeNames(painlessClassBinding.typeParameters) + "] and " +
|
||||
"with the same name and arity but different constructors or methods");
|
||||
}
|
||||
}
|
||||
@ -1139,7 +1140,7 @@ public final class PainlessLookupBuilder {
|
||||
}
|
||||
|
||||
return new PainlessLookup(canonicalClassNamesToClasses, classesToPainlessClasses,
|
||||
painlessMethodKeysToImportedPainlessMethods, painlessMethodKeysToPainlessBindings);
|
||||
painlessMethodKeysToImportedPainlessMethods, painlessMethodKeysToPainlessClassBindings);
|
||||
}
|
||||
|
||||
private void copyPainlessClassMembers() {
|
||||
|
@ -24,7 +24,7 @@ import org.elasticsearch.painless.Locals;
|
||||
import org.elasticsearch.painless.Locals.LocalMethod;
|
||||
import org.elasticsearch.painless.Location;
|
||||
import org.elasticsearch.painless.MethodWriter;
|
||||
import org.elasticsearch.painless.lookup.PainlessBinding;
|
||||
import org.elasticsearch.painless.lookup.PainlessClassBinding;
|
||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||
import org.objectweb.asm.Label;
|
||||
import org.objectweb.asm.Type;
|
||||
@ -45,9 +45,9 @@ public final class ECallLocal extends AExpression {
|
||||
private final String name;
|
||||
private final List<AExpression> arguments;
|
||||
|
||||
private LocalMethod method = null;
|
||||
private PainlessMethod imported = null;
|
||||
private PainlessBinding binding = null;
|
||||
private LocalMethod localMethod = null;
|
||||
private PainlessMethod importedMethod = null;
|
||||
private PainlessClassBinding classBinding = null;
|
||||
|
||||
public ECallLocal(Location location, String name, List<AExpression> arguments) {
|
||||
super(location);
|
||||
@ -65,15 +65,15 @@ public final class ECallLocal extends AExpression {
|
||||
|
||||
@Override
|
||||
void analyze(Locals locals) {
|
||||
method = locals.getMethod(name, arguments.size());
|
||||
localMethod = locals.getMethod(name, arguments.size());
|
||||
|
||||
if (method == null) {
|
||||
imported = locals.getPainlessLookup().lookupImportedPainlessMethod(name, arguments.size());
|
||||
if (localMethod == null) {
|
||||
importedMethod = locals.getPainlessLookup().lookupImportedPainlessMethod(name, arguments.size());
|
||||
|
||||
if (imported == null) {
|
||||
binding = locals.getPainlessLookup().lookupPainlessBinding(name, arguments.size());
|
||||
if (importedMethod == null) {
|
||||
classBinding = locals.getPainlessLookup().lookupPainlessClassBinding(name, arguments.size());
|
||||
|
||||
if (binding == null) {
|
||||
if (classBinding == null) {
|
||||
throw createError(
|
||||
new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments."));
|
||||
}
|
||||
@ -82,15 +82,15 @@ public final class ECallLocal extends AExpression {
|
||||
|
||||
List<Class<?>> typeParameters;
|
||||
|
||||
if (method != null) {
|
||||
typeParameters = new ArrayList<>(method.typeParameters);
|
||||
actual = method.returnType;
|
||||
} else if (imported != null) {
|
||||
typeParameters = new ArrayList<>(imported.typeParameters);
|
||||
actual = imported.returnType;
|
||||
} else if (binding != null) {
|
||||
typeParameters = new ArrayList<>(binding.typeParameters);
|
||||
actual = binding.returnType;
|
||||
if (localMethod != null) {
|
||||
typeParameters = new ArrayList<>(localMethod.typeParameters);
|
||||
actual = localMethod.returnType;
|
||||
} else if (importedMethod != null) {
|
||||
typeParameters = new ArrayList<>(importedMethod.typeParameters);
|
||||
actual = importedMethod.returnType;
|
||||
} else if (classBinding != null) {
|
||||
typeParameters = new ArrayList<>(classBinding.typeParameters);
|
||||
actual = classBinding.returnType;
|
||||
} else {
|
||||
throw new IllegalStateException("Illegal tree structure.");
|
||||
}
|
||||
@ -111,23 +111,23 @@ public final class ECallLocal extends AExpression {
|
||||
void write(MethodWriter writer, Globals globals) {
|
||||
writer.writeDebugInfo(location);
|
||||
|
||||
if (method != null) {
|
||||
if (localMethod != null) {
|
||||
for (AExpression argument : arguments) {
|
||||
argument.write(writer, globals);
|
||||
}
|
||||
|
||||
writer.invokeStatic(CLASS_TYPE, new Method(method.name, method.methodType.toMethodDescriptorString()));
|
||||
} else if (imported != null) {
|
||||
writer.invokeStatic(CLASS_TYPE, new Method(localMethod.name, localMethod.methodType.toMethodDescriptorString()));
|
||||
} else if (importedMethod != null) {
|
||||
for (AExpression argument : arguments) {
|
||||
argument.write(writer, globals);
|
||||
}
|
||||
|
||||
writer.invokeStatic(Type.getType(imported.targetClass),
|
||||
new Method(imported.javaMethod.getName(), imported.methodType.toMethodDescriptorString()));
|
||||
} else if (binding != null) {
|
||||
String name = globals.addBinding(binding.javaConstructor.getDeclaringClass());
|
||||
Type type = Type.getType(binding.javaConstructor.getDeclaringClass());
|
||||
int javaConstructorParameterCount = binding.javaConstructor.getParameterCount();
|
||||
writer.invokeStatic(Type.getType(importedMethod.targetClass),
|
||||
new Method(importedMethod.javaMethod.getName(), importedMethod.methodType.toMethodDescriptorString()));
|
||||
} else if (classBinding != null) {
|
||||
String name = globals.addBinding(classBinding.javaConstructor.getDeclaringClass());
|
||||
Type type = Type.getType(classBinding.javaConstructor.getDeclaringClass());
|
||||
int javaConstructorParameterCount = classBinding.javaConstructor.getParameterCount();
|
||||
|
||||
Label nonNull = new Label();
|
||||
|
||||
@ -142,18 +142,18 @@ public final class ECallLocal extends AExpression {
|
||||
arguments.get(argument).write(writer, globals);
|
||||
}
|
||||
|
||||
writer.invokeConstructor(type, Method.getMethod(binding.javaConstructor));
|
||||
writer.invokeConstructor(type, Method.getMethod(classBinding.javaConstructor));
|
||||
writer.putField(CLASS_TYPE, name, type);
|
||||
|
||||
writer.mark(nonNull);
|
||||
writer.loadThis();
|
||||
writer.getField(CLASS_TYPE, name, type);
|
||||
|
||||
for (int argument = 0; argument < binding.javaMethod.getParameterCount(); ++argument) {
|
||||
for (int argument = 0; argument < classBinding.javaMethod.getParameterCount(); ++argument) {
|
||||
arguments.get(argument + javaConstructorParameterCount).write(writer, globals);
|
||||
}
|
||||
|
||||
writer.invokeVirtual(type, Method.getMethod(binding.javaMethod));
|
||||
writer.invokeVirtual(type, Method.getMethod(classBinding.javaMethod));
|
||||
} else {
|
||||
throw new IllegalStateException("Illegal tree structure.");
|
||||
}
|
||||
|
@ -417,18 +417,33 @@ public final class SSource extends AStatement {
|
||||
for (AStatement statement : statements) {
|
||||
statement.write(writer, globals);
|
||||
}
|
||||
|
||||
if (!methodEscape) {
|
||||
switch (scriptClassInfo.getExecuteMethod().getReturnType().getSort()) {
|
||||
case org.objectweb.asm.Type.VOID: break;
|
||||
case org.objectweb.asm.Type.BOOLEAN: writer.push(false); break;
|
||||
case org.objectweb.asm.Type.BYTE: writer.push(0); break;
|
||||
case org.objectweb.asm.Type.SHORT: writer.push(0); break;
|
||||
case org.objectweb.asm.Type.INT: writer.push(0); break;
|
||||
case org.objectweb.asm.Type.LONG: writer.push(0L); break;
|
||||
case org.objectweb.asm.Type.FLOAT: writer.push(0f); break;
|
||||
case org.objectweb.asm.Type.DOUBLE: writer.push(0d); break;
|
||||
default: writer.visitInsn(Opcodes.ACONST_NULL);
|
||||
case org.objectweb.asm.Type.VOID:
|
||||
break;
|
||||
case org.objectweb.asm.Type.BOOLEAN:
|
||||
writer.push(false);
|
||||
break;
|
||||
case org.objectweb.asm.Type.BYTE:
|
||||
writer.push(0);
|
||||
break;
|
||||
case org.objectweb.asm.Type.SHORT:
|
||||
writer.push(0);
|
||||
break;
|
||||
case org.objectweb.asm.Type.INT:
|
||||
writer.push(0);
|
||||
break;
|
||||
case org.objectweb.asm.Type.LONG:
|
||||
writer.push(0L);
|
||||
break;
|
||||
case org.objectweb.asm.Type.FLOAT:
|
||||
writer.push(0f);
|
||||
break;
|
||||
case org.objectweb.asm.Type.DOUBLE:
|
||||
writer.push(0d);
|
||||
break;
|
||||
default:
|
||||
writer.visitInsn(Opcodes.ACONST_NULL);
|
||||
}
|
||||
writer.returnValue();
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.BlendedTermQuery;
|
||||
import org.apache.lucene.queries.CommonTermsQuery;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
@ -38,7 +39,6 @@ import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SynonymQuery;
|
||||
import org.apache.lucene.search.TermInSetQuery;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.spans.SpanFirstQuery;
|
||||
import org.apache.lucene.search.spans.SpanNearQuery;
|
||||
import org.apache.lucene.search.spans.SpanNotQuery;
|
||||
@ -489,43 +489,51 @@ final class QueryAnalyzer {
|
||||
return subResult;
|
||||
}
|
||||
}
|
||||
int msm = 0;
|
||||
boolean verified = true;
|
||||
boolean matchAllDocs = true;
|
||||
boolean hasDuplicateTerms = false;Set<QueryExtraction> extractions = new HashSet<>();
|
||||
Set<String> seenRangeFields = new HashSet<>();
|
||||
for (Result result : conjunctions) {
|
||||
// In case that there are duplicate query extractions we need to be careful with incrementing msm,
|
||||
// because that could lead to valid matches not becoming candidate matches:
|
||||
// query: (field:val1 AND field:val2) AND (field:val2 AND field:val3)
|
||||
// doc: field: val1 val2 val3
|
||||
// So lets be protective and decrease the msm:
|
||||
int resultMsm = result.minimumShouldMatch;
|
||||
for (QueryExtraction queryExtraction : result.extractions) {
|
||||
if (queryExtraction.range != null) {
|
||||
// In case of range queries each extraction does not simply increment the minimum_should_match
|
||||
// for that percolator query like for a term based extraction, so that can lead to more false
|
||||
// positives for percolator queries with range queries than term based queries.
|
||||
// The is because the way number fields are extracted from the document to be percolated.
|
||||
// Per field a single range is extracted and if a percolator query has two or more range queries
|
||||
// on the same field, then the minimum should match can be higher than clauses in the CoveringQuery.
|
||||
// Therefore right now the minimum should match is incremented once per number field when processing
|
||||
// the percolator query at index time.
|
||||
if (seenRangeFields.add(queryExtraction.range.fieldName)) {
|
||||
resultMsm = 1;
|
||||
} else {
|
||||
resultMsm = 0;
|
||||
}
|
||||
}
|
||||
int msm = 0;
|
||||
boolean verified = true;
|
||||
boolean matchAllDocs = true;
|
||||
boolean hasDuplicateTerms = false;
|
||||
Set<QueryExtraction> extractions = new HashSet<>();
|
||||
Set<String> seenRangeFields = new HashSet<>();
|
||||
for (Result result : conjunctions) {
|
||||
// In case that there are duplicate query extractions we need to be careful with
|
||||
// incrementing msm,
|
||||
// because that could lead to valid matches not becoming candidate matches:
|
||||
// query: (field:val1 AND field:val2) AND (field:val2 AND field:val3)
|
||||
// doc: field: val1 val2 val3
|
||||
// So lets be protective and decrease the msm:
|
||||
int resultMsm = result.minimumShouldMatch;
|
||||
for (QueryExtraction queryExtraction : result.extractions) {
|
||||
if (queryExtraction.range != null) {
|
||||
// In case of range queries each extraction does not simply increment the
|
||||
// minimum_should_match
|
||||
// for that percolator query like for a term based extraction, so that can lead
|
||||
// to more false
|
||||
// positives for percolator queries with range queries than term based queries.
|
||||
// The is because the way number fields are extracted from the document to be
|
||||
// percolated.
|
||||
// Per field a single range is extracted and if a percolator query has two or
|
||||
// more range queries
|
||||
// on the same field, then the minimum should match can be higher than clauses
|
||||
// in the CoveringQuery.
|
||||
// Therefore right now the minimum should match is incremented once per number
|
||||
// field when processing
|
||||
// the percolator query at index time.
|
||||
if (seenRangeFields.add(queryExtraction.range.fieldName)) {
|
||||
resultMsm = 1;
|
||||
} else {
|
||||
resultMsm = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (extractions.contains(queryExtraction)) {
|
||||
if (extractions.contains(queryExtraction)) {
|
||||
|
||||
resultMsm = 0;
|
||||
verified = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
msm += resultMsm;
|
||||
resultMsm = 0;
|
||||
verified = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
msm += resultMsm;
|
||||
|
||||
if (result.verified == false
|
||||
// If some inner extractions are optional, the result can't be verified
|
||||
|
@ -36,7 +36,6 @@ import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.client.ParentTaskAssigningClient;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
@ -104,7 +103,6 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||
private final ActionListener<BulkByScrollResponse> listener;
|
||||
private final Retry bulkRetry;
|
||||
private final ScrollableHitSource scrollSource;
|
||||
private final Settings settings;
|
||||
|
||||
/**
|
||||
* This BiFunction is used to apply various changes depending of the Reindex action and the search hit,
|
||||
@ -113,15 +111,9 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||
*/
|
||||
private final BiFunction<RequestWrapper<?>, ScrollableHitSource.Hit, RequestWrapper<?>> scriptApplier;
|
||||
|
||||
public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client,
|
||||
ThreadPool threadPool, Request mainRequest, ScriptService scriptService,
|
||||
ClusterState clusterState, ActionListener<BulkByScrollResponse> listener) {
|
||||
this(task, logger, client, threadPool, mainRequest, scriptService, clusterState, listener, client.settings());
|
||||
}
|
||||
|
||||
public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client,
|
||||
ThreadPool threadPool, Request mainRequest, ScriptService scriptService, ClusterState clusterState,
|
||||
ActionListener<BulkByScrollResponse> listener, Settings settings) {
|
||||
ActionListener<BulkByScrollResponse> listener) {
|
||||
|
||||
this.task = task;
|
||||
if (!task.isWorker()) {
|
||||
@ -131,7 +123,6 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||
|
||||
this.logger = logger;
|
||||
this.client = client;
|
||||
this.settings = settings;
|
||||
this.threadPool = threadPool;
|
||||
this.scriptService = scriptService;
|
||||
this.clusterState = clusterState;
|
||||
@ -357,7 +348,7 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
||||
public void onFailure(Exception e) {
|
||||
finishHim(e);
|
||||
}
|
||||
}, settings);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -256,16 +256,10 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
|
||||
*/
|
||||
private List<Thread> createdThreads = emptyList();
|
||||
|
||||
AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client,
|
||||
ThreadPool threadPool, ReindexRequest request, ScriptService scriptService, ClusterState clusterState,
|
||||
ActionListener<BulkByScrollResponse> listener) {
|
||||
this(task, logger, client, threadPool, request, scriptService, clusterState, listener, client.settings());
|
||||
}
|
||||
|
||||
AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client,
|
||||
ThreadPool threadPool, ReindexRequest request, ScriptService scriptService, ClusterState clusterState,
|
||||
ActionListener<BulkByScrollResponse> listener, Settings settings) {
|
||||
super(task, logger, client, threadPool, request, scriptService, clusterState, listener, settings);
|
||||
ActionListener<BulkByScrollResponse> listener) {
|
||||
super(task, logger, client, threadPool, request, scriptService, clusterState, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -82,16 +82,10 @@ public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateB
|
||||
* Simple implementation of update-by-query using scrolling and bulk.
|
||||
*/
|
||||
static class AsyncIndexBySearchAction extends AbstractAsyncBulkByScrollAction<UpdateByQueryRequest> {
|
||||
AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client,
|
||||
ThreadPool threadPool, UpdateByQueryRequest request, ScriptService scriptService, ClusterState clusterState,
|
||||
ActionListener<BulkByScrollResponse> listener) {
|
||||
this(task, logger, client, threadPool, request, scriptService, clusterState, listener, client.settings());
|
||||
}
|
||||
|
||||
AsyncIndexBySearchAction(BulkByScrollTask task, Logger logger, ParentTaskAssigningClient client,
|
||||
ThreadPool threadPool, UpdateByQueryRequest request, ScriptService scriptService, ClusterState clusterState,
|
||||
ActionListener<BulkByScrollResponse> listener, Settings settings) {
|
||||
super(task, logger, client, threadPool, request, scriptService, clusterState, listener, settings);
|
||||
ActionListener<BulkByScrollResponse> listener) {
|
||||
super(task, logger, client, threadPool, request, scriptService, clusterState, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -672,7 +672,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
||||
private class DummyAsyncBulkByScrollAction extends AbstractAsyncBulkByScrollAction<DummyAbstractBulkByScrollRequest> {
|
||||
DummyAsyncBulkByScrollAction() {
|
||||
super(testTask, AsyncBulkByScrollActionTests.this.logger, new ParentTaskAssigningClient(client, localNode, testTask),
|
||||
client.threadPool(), testRequest, null, null, listener, Settings.EMPTY);
|
||||
client.threadPool(), testRequest, null, null, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.Hit;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
* Index-by-search test for ttl, timestamp, and routing.
|
||||
@ -78,7 +77,7 @@ public class ReindexMetadataTests extends AbstractAsyncBulkByScrollActionMetadat
|
||||
private class TestAction extends TransportReindexAction.AsyncIndexBySearchAction {
|
||||
TestAction() {
|
||||
super(ReindexMetadataTests.this.task, ReindexMetadataTests.this.logger, null, ReindexMetadataTests.this.threadPool, request(),
|
||||
null, null, listener(), Settings.EMPTY);
|
||||
null, null, listener());
|
||||
}
|
||||
|
||||
public ReindexRequest mainRequest() {
|
||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
||||
import java.util.Map;
|
||||
@ -104,7 +103,7 @@ public class ReindexScriptTests extends AbstractAsyncBulkByScrollActionScriptTes
|
||||
|
||||
@Override
|
||||
protected TransportReindexAction.AsyncIndexBySearchAction action(ScriptService scriptService, ReindexRequest request) {
|
||||
return new TransportReindexAction.AsyncIndexBySearchAction(task, logger, null, threadPool, request, scriptService, null,
|
||||
listener(), Settings.EMPTY);
|
||||
return new TransportReindexAction.AsyncIndexBySearchAction(task, logger, null, threadPool, request, scriptService,
|
||||
null, listener());
|
||||
}
|
||||
}
|
||||
|
@ -188,7 +188,7 @@ public class RetryTests extends ESIntegTestCase {
|
||||
}
|
||||
|
||||
Retry retry = new Retry(BackoffPolicy.exponentialBackoff(), client().threadPool());
|
||||
BulkResponse initialBulkResponse = retry.withBackoff(client()::bulk, bulk.request(), client().settings()).actionGet();
|
||||
BulkResponse initialBulkResponse = retry.withBackoff(client()::bulk, bulk.request()).actionGet();
|
||||
assertFalse(initialBulkResponse.buildFailureMessage(), initialBulkResponse.hasFailures());
|
||||
client().admin().indices().prepareRefresh("source").get();
|
||||
|
||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.Hit;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
public class UpdateByQueryMetadataTests
|
||||
extends AbstractAsyncBulkByScrollActionMetadataTestCase<UpdateByQueryRequest, BulkByScrollResponse> {
|
||||
@ -44,8 +43,7 @@ public class UpdateByQueryMetadataTests
|
||||
private class TestAction extends TransportUpdateByQueryAction.AsyncIndexBySearchAction {
|
||||
TestAction() {
|
||||
super(UpdateByQueryMetadataTests.this.task, UpdateByQueryMetadataTests.this.logger, null,
|
||||
UpdateByQueryMetadataTests.this.threadPool, request(), null, null, listener(),
|
||||
Settings.EMPTY);
|
||||
UpdateByQueryMetadataTests.this.threadPool, request(), null, null, listener());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
||||
import java.util.Date;
|
||||
@ -54,7 +53,7 @@ public class UpdateByQueryWithScriptTests
|
||||
|
||||
@Override
|
||||
protected TransportUpdateByQueryAction.AsyncIndexBySearchAction action(ScriptService scriptService, UpdateByQueryRequest request) {
|
||||
return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(task, logger, null, threadPool, request, scriptService, null,
|
||||
listener(), Settings.EMPTY);
|
||||
return new TransportUpdateByQueryAction.AsyncIndexBySearchAction(task, logger, null, threadPool, request, scriptService,
|
||||
null, listener());
|
||||
}
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase
|
||||
BigArrays.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService()) {
|
||||
|
||||
@Override
|
||||
protected Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout) throws IOException,
|
||||
public Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout) throws IOException,
|
||||
InterruptedException {
|
||||
if (doHandshake) {
|
||||
return super.executeHandshake(node, channel, timeout);
|
||||
|
@ -30,7 +30,7 @@ dependencies {
|
||||
compile("com.fasterxml.jackson.core:jackson-databind:${versions.jackson}")
|
||||
compile('com.maxmind.db:maxmind-db:1.2.2')
|
||||
|
||||
testCompile 'org.elasticsearch:geolite2-databases:20180303'
|
||||
testCompile 'org.elasticsearch:geolite2-databases:20180911'
|
||||
}
|
||||
|
||||
task copyDefaultGeoIp2DatabaseFiles(type: Copy) {
|
||||
|
@ -134,8 +134,8 @@ public class GeoIpProcessorTests extends ESTestCase {
|
||||
assertThat(geoData.get("city_name"), equalTo("Hollywood"));
|
||||
assertThat(geoData.get("timezone"), equalTo("America/New_York"));
|
||||
Map<String, Object> location = new HashMap<>();
|
||||
location.put("lat", 26.0252d);
|
||||
location.put("lon", -80.296d);
|
||||
location.put("lat", 25.9825d);
|
||||
location.put("lon", -80.3434d);
|
||||
assertThat(geoData.get("location"), equalTo(location));
|
||||
}
|
||||
|
||||
@ -197,7 +197,7 @@ public class GeoIpProcessorTests extends ESTestCase {
|
||||
}
|
||||
|
||||
public void testAsn() throws Exception {
|
||||
String ip = "82.170.213.79";
|
||||
String ip = "82.171.64.0";
|
||||
InputStream database = getDatabaseFileInputStream("/GeoLite2-ASN.mmdb");
|
||||
GeoIpProcessor processor = new GeoIpProcessor(randomAlphaOfLength(10), "source_field",
|
||||
new DatabaseReader.Builder(database).build(), "target_field", EnumSet.allOf(GeoIpProcessor.Property.class), false,
|
||||
@ -213,7 +213,7 @@ public class GeoIpProcessorTests extends ESTestCase {
|
||||
Map<String, Object> geoData = (Map<String, Object>) ingestDocument.getSourceAndMetadata().get("target_field");
|
||||
assertThat(geoData.size(), equalTo(3));
|
||||
assertThat(geoData.get("ip"), equalTo(ip));
|
||||
assertThat(geoData.get("asn"), equalTo(5615));
|
||||
assertThat(geoData.get("asn"), equalTo(1136));
|
||||
assertThat(geoData.get("organization_name"), equalTo("KPN B.V."));
|
||||
}
|
||||
|
||||
|
@ -33,8 +33,8 @@
|
||||
- length: { _source.geoip: 6 }
|
||||
- match: { _source.geoip.city_name: "Minneapolis" }
|
||||
- match: { _source.geoip.country_iso_code: "US" }
|
||||
- match: { _source.geoip.location.lon: -93.2166 }
|
||||
- match: { _source.geoip.location.lat: 44.9759 }
|
||||
- match: { _source.geoip.location.lon: -93.2323 }
|
||||
- match: { _source.geoip.location.lat: 44.9733 }
|
||||
- match: { _source.geoip.region_iso_code: "US-MN" }
|
||||
- match: { _source.geoip.region_name: "Minnesota" }
|
||||
- match: { _source.geoip.continent_name: "North America" }
|
||||
@ -80,8 +80,8 @@
|
||||
- match: { _source.geoip.city_name: "Minneapolis" }
|
||||
- match: { _source.geoip.country_iso_code: "US" }
|
||||
- match: { _source.geoip.ip: "128.101.101.101" }
|
||||
- match: { _source.geoip.location.lon: -93.2166 }
|
||||
- match: { _source.geoip.location.lat: 44.9759 }
|
||||
- match: { _source.geoip.location.lon: -93.2323 }
|
||||
- match: { _source.geoip.location.lat: 44.9733 }
|
||||
- match: { _source.geoip.timezone: "America/Chicago" }
|
||||
- match: { _source.geoip.country_name: "United States" }
|
||||
- match: { _source.geoip.region_iso_code: "US-MN" }
|
||||
@ -193,8 +193,8 @@
|
||||
- length: { _source.geoip: 6 }
|
||||
- match: { _source.geoip.city_name: "Minneapolis" }
|
||||
- match: { _source.geoip.country_iso_code: "US" }
|
||||
- match: { _source.geoip.location.lon: -93.2166 }
|
||||
- match: { _source.geoip.location.lat: 44.9759 }
|
||||
- match: { _source.geoip.location.lon: -93.2323 }
|
||||
- match: { _source.geoip.location.lat: 44.9733 }
|
||||
- match: { _source.geoip.region_iso_code: "US-MN" }
|
||||
- match: { _source.geoip.region_name: "Minnesota" }
|
||||
- match: { _source.geoip.continent_name: "North America" }
|
||||
@ -224,15 +224,15 @@
|
||||
type: test
|
||||
id: 1
|
||||
pipeline: "my_pipeline"
|
||||
body: {field1: "82.170.213.79"}
|
||||
body: {field1: "82.171.64.0"}
|
||||
|
||||
- do:
|
||||
get:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
- match: { _source.field1: "82.170.213.79" }
|
||||
- match: { _source.field1: "82.171.64.0" }
|
||||
- length: { _source.geoip: 3 }
|
||||
- match: { _source.geoip.ip: "82.170.213.79" }
|
||||
- match: { _source.geoip.asn: 5615 }
|
||||
- match: { _source.geoip.ip: "82.171.64.0" }
|
||||
- match: { _source.geoip.asn: 1136 }
|
||||
- match: { _source.geoip.organization_name: "KPN B.V." }
|
||||
|
@ -295,7 +295,10 @@ public class AnnotatedTextFieldMapper extends FieldMapper {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(textMinusMarkup);
|
||||
sb.append("\n");
|
||||
annotations.forEach(a -> {sb.append(a); sb.append("\n");});
|
||||
annotations.forEach(a -> {
|
||||
sb.append(a);
|
||||
sb.append("\n");
|
||||
});
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
|
@ -4,8 +4,8 @@
|
||||
---
|
||||
"annotated highlighter on annotated text":
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: Annotated text type introduced in 7.0.0-alpha1
|
||||
version: " - 6.4.99"
|
||||
reason: Annotated text type introduced in 6.5.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
@ -70,12 +70,14 @@ bundlePlugin {
|
||||
|
||||
additionalTest('testRepositoryCreds'){
|
||||
include '**/RepositoryCredentialsTests.class'
|
||||
include '**/S3BlobStoreRepositoryTests.class'
|
||||
systemProperty 'es.allow_insecure_settings', 'true'
|
||||
}
|
||||
|
||||
test {
|
||||
// these are tested explicitly in separate test tasks
|
||||
exclude '**/*CredentialsTests.class'
|
||||
exclude '**/S3BlobStoreRepositoryTests.class'
|
||||
}
|
||||
|
||||
boolean useFixture = false
|
||||
|
@ -19,18 +19,12 @@
|
||||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.Protocol;
|
||||
import com.amazonaws.auth.AWSCredentials;
|
||||
import com.amazonaws.auth.BasicAWSCredentials;
|
||||
|
||||
import com.amazonaws.auth.BasicSessionCredentials;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.settings.SecureSetting;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
@ -38,6 +32,12 @@ import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A container for settings used to create an S3 client.
|
||||
*/
|
||||
@ -160,19 +160,6 @@ final class S3ClientSettings {
|
||||
return Collections.unmodifiableMap(clients);
|
||||
}
|
||||
|
||||
static Map<String, S3ClientSettings> overrideCredentials(Map<String, S3ClientSettings> clientsSettings,
|
||||
BasicAWSCredentials credentials) {
|
||||
final MapBuilder<String, S3ClientSettings> mapBuilder = new MapBuilder<>();
|
||||
for (final Map.Entry<String, S3ClientSettings> entry : clientsSettings.entrySet()) {
|
||||
final S3ClientSettings s3ClientSettings = new S3ClientSettings(credentials, entry.getValue().endpoint,
|
||||
entry.getValue().protocol, entry.getValue().proxyHost, entry.getValue().proxyPort, entry.getValue().proxyUsername,
|
||||
entry.getValue().proxyPassword, entry.getValue().readTimeoutMillis, entry.getValue().maxRetries,
|
||||
entry.getValue().throttleRetries);
|
||||
mapBuilder.put(entry.getKey(), s3ClientSettings);
|
||||
}
|
||||
return mapBuilder.immutableMap();
|
||||
}
|
||||
|
||||
static boolean checkDeprecatedCredentials(Settings repositorySettings) {
|
||||
if (S3Repository.ACCESS_KEY_SETTING.exists(repositorySettings)) {
|
||||
if (S3Repository.SECRET_KEY_SETTING.exists(repositorySettings) == false) {
|
||||
@ -224,25 +211,37 @@ final class S3ClientSettings {
|
||||
|
||||
// pkg private for tests
|
||||
/** Parse settings for a single client. */
|
||||
static S3ClientSettings getClientSettings(Settings settings, String clientName) {
|
||||
static S3ClientSettings getClientSettings(final Settings settings, final String clientName) {
|
||||
final AWSCredentials credentials = S3ClientSettings.loadCredentials(settings, clientName);
|
||||
return getClientSettings(settings, clientName, credentials);
|
||||
}
|
||||
|
||||
static S3ClientSettings getClientSettings(final Settings settings, final String clientName, final AWSCredentials credentials) {
|
||||
try (SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING);
|
||||
SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING)) {
|
||||
return new S3ClientSettings(
|
||||
credentials,
|
||||
getConfigValue(settings, clientName, ENDPOINT_SETTING),
|
||||
getConfigValue(settings, clientName, PROTOCOL_SETTING),
|
||||
getConfigValue(settings, clientName, PROXY_HOST_SETTING),
|
||||
getConfigValue(settings, clientName, PROXY_PORT_SETTING),
|
||||
proxyUsername.toString(),
|
||||
proxyPassword.toString(),
|
||||
(int)getConfigValue(settings, clientName, READ_TIMEOUT_SETTING).millis(),
|
||||
getConfigValue(settings, clientName, MAX_RETRIES_SETTING),
|
||||
getConfigValue(settings, clientName, USE_THROTTLE_RETRIES_SETTING)
|
||||
credentials,
|
||||
getConfigValue(settings, clientName, ENDPOINT_SETTING),
|
||||
getConfigValue(settings, clientName, PROTOCOL_SETTING),
|
||||
getConfigValue(settings, clientName, PROXY_HOST_SETTING),
|
||||
getConfigValue(settings, clientName, PROXY_PORT_SETTING),
|
||||
proxyUsername.toString(),
|
||||
proxyPassword.toString(),
|
||||
Math.toIntExact(getConfigValue(settings, clientName, READ_TIMEOUT_SETTING).millis()),
|
||||
getConfigValue(settings, clientName, MAX_RETRIES_SETTING),
|
||||
getConfigValue(settings, clientName, USE_THROTTLE_RETRIES_SETTING)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
static S3ClientSettings getClientSettings(final RepositoryMetaData metadata, final AWSCredentials credentials) {
|
||||
final Settings.Builder builder = Settings.builder();
|
||||
for (final String key : metadata.settings().keySet()) {
|
||||
builder.put(PREFIX + "provided" + "." + key, metadata.settings().get(key));
|
||||
}
|
||||
return getClientSettings(builder.build(), "provided", credentials);
|
||||
}
|
||||
|
||||
private static <T> T getConfigValue(Settings settings, String clientName,
|
||||
Setting.AffixSetting<T> clientSetting) {
|
||||
final Setting<T> concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName);
|
||||
|
@ -35,7 +35,6 @@ import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.repositories.RepositoryException;
|
||||
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
@ -163,6 +162,8 @@ class S3Repository extends BlobStoreRepository {
|
||||
|
||||
private final String clientName;
|
||||
|
||||
private final AmazonS3Reference reference;
|
||||
|
||||
/**
|
||||
* Constructs an s3 backed repository
|
||||
*/
|
||||
@ -200,21 +201,54 @@ class S3Repository extends BlobStoreRepository {
|
||||
|
||||
this.storageClass = STORAGE_CLASS_SETTING.get(metadata.settings());
|
||||
this.cannedACL = CANNED_ACL_SETTING.get(metadata.settings());
|
||||
|
||||
this.clientName = CLIENT_NAME.get(metadata.settings());
|
||||
|
||||
logger.debug("using bucket [{}], chunk_size [{}], server_side_encryption [{}], " +
|
||||
"buffer_size [{}], cannedACL [{}], storageClass [{}]",
|
||||
bucket, chunkSize, serverSideEncryption, bufferSize, cannedACL, storageClass);
|
||||
|
||||
// (repository settings)
|
||||
if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) {
|
||||
overrideCredentialsFromClusterState(service);
|
||||
if (CLIENT_NAME.exists(metadata.settings()) && S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) {
|
||||
logger.warn(
|
||||
"ignoring use of named client [{}] for repository [{}] as insecure credentials were specified",
|
||||
clientName,
|
||||
metadata.name());
|
||||
}
|
||||
|
||||
if (S3ClientSettings.checkDeprecatedCredentials(metadata.settings())) {
|
||||
// provided repository settings
|
||||
deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead "
|
||||
+ "store these in named clients and the elasticsearch keystore for secure settings.");
|
||||
final BasicAWSCredentials insecureCredentials = S3ClientSettings.loadDeprecatedCredentials(metadata.settings());
|
||||
final S3ClientSettings s3ClientSettings = S3ClientSettings.getClientSettings(metadata, insecureCredentials);
|
||||
this.reference = new AmazonS3Reference(service.buildClient(s3ClientSettings));
|
||||
} else {
|
||||
reference = null;
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
"using bucket [{}], chunk_size [{}], server_side_encryption [{}], buffer_size [{}], cannedACL [{}], storageClass [{}]",
|
||||
bucket,
|
||||
chunkSize,
|
||||
serverSideEncryption,
|
||||
bufferSize,
|
||||
cannedACL,
|
||||
storageClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected S3BlobStore createBlobStore() {
|
||||
return new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
|
||||
if (reference != null) {
|
||||
assert S3ClientSettings.checkDeprecatedCredentials(metadata.settings()) : metadata.name();
|
||||
return new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass) {
|
||||
@Override
|
||||
public AmazonS3Reference clientReference() {
|
||||
if (reference.tryIncRef()) {
|
||||
return reference;
|
||||
} else {
|
||||
throw new IllegalStateException("S3 client is closed");
|
||||
}
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
|
||||
}
|
||||
}
|
||||
|
||||
// only use for testing
|
||||
@ -244,13 +278,13 @@ class S3Repository extends BlobStoreRepository {
|
||||
return chunkSize;
|
||||
}
|
||||
|
||||
void overrideCredentialsFromClusterState(final S3Service s3Service) {
|
||||
deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead "
|
||||
+ "store these in named clients and the elasticsearch keystore for secure settings.");
|
||||
final BasicAWSCredentials insecureCredentials = S3ClientSettings.loadDeprecatedCredentials(metadata.settings());
|
||||
// hack, but that's ok because the whole if branch should be axed
|
||||
final Map<String, S3ClientSettings> prevSettings = s3Service.refreshAndClearCache(S3ClientSettings.load(Settings.EMPTY));
|
||||
final Map<String, S3ClientSettings> newSettings = S3ClientSettings.overrideCredentials(prevSettings, insecureCredentials);
|
||||
s3Service.refreshAndClearCache(newSettings);
|
||||
@Override
|
||||
protected void doClose() {
|
||||
if (reference != null) {
|
||||
assert S3ClientSettings.checkDeprecatedCredentials(metadata.settings()) : metadata.name();
|
||||
reference.decRef();
|
||||
}
|
||||
super.doClose();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -107,7 +107,6 @@ public class RepositoryCredentialsTests extends ESTestCase {
|
||||
final Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
// repository settings for credentials override node secure settings
|
||||
final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder()
|
||||
.put(S3Repository.CLIENT_NAME.getKey(), randomFrom(clientNames))
|
||||
.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key")
|
||||
.put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret").build());
|
||||
try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings);
|
||||
@ -163,11 +162,13 @@ public class RepositoryCredentialsTests extends ESTestCase {
|
||||
secureSettings.setString("s3.client." + clientName + ".secret_key", "secure_aws_secret");
|
||||
final Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
|
||||
// repository settings
|
||||
final Settings.Builder builder = Settings.builder().put(S3Repository.CLIENT_NAME.getKey(), clientName);
|
||||
final Settings.Builder builder = Settings.builder();
|
||||
final boolean repositorySettings = randomBoolean();
|
||||
if (repositorySettings) {
|
||||
builder.put(S3Repository.ACCESS_KEY_SETTING.getKey(), "insecure_aws_key");
|
||||
builder.put(S3Repository.SECRET_KEY_SETTING.getKey(), "insecure_aws_secret");
|
||||
} else {
|
||||
builder.put(S3Repository.CLIENT_NAME.getKey(), clientName);
|
||||
}
|
||||
final RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", builder.build());
|
||||
try (S3RepositoryPlugin s3Plugin = new ProxyS3RepositoryPlugin(settings);
|
||||
@ -202,8 +203,13 @@ public class RepositoryCredentialsTests extends ESTestCase {
|
||||
try (AmazonS3Reference s3Ref = ((S3BlobStore) s3repo.blobStore()).clientReference()) {
|
||||
final AWSCredentials newCredentials = ((ProxyS3RepositoryPlugin.ClientAndCredentials) s3Ref.client()).credentials
|
||||
.getCredentials();
|
||||
assertThat(newCredentials.getAWSAccessKeyId(), is("new_secret_aws_key"));
|
||||
assertThat(newCredentials.getAWSSecretKey(), is("new_secret_aws_secret"));
|
||||
if (repositorySettings) {
|
||||
assertThat(newCredentials.getAWSAccessKeyId(), is("insecure_aws_key"));
|
||||
assertThat(newCredentials.getAWSSecretKey(), is("insecure_aws_secret"));
|
||||
} else {
|
||||
assertThat(newCredentials.getAWSAccessKeyId(), is("new_secret_aws_key"));
|
||||
assertThat(newCredentials.getAWSSecretKey(), is("new_secret_aws_secret"));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (repositorySettings) {
|
||||
|
@ -18,9 +18,9 @@
|
||||
*/
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.model.CannedAccessControlList;
|
||||
import com.amazonaws.services.s3.model.StorageClass;
|
||||
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsFilter;
|
||||
@ -91,7 +91,6 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa
|
||||
.setVerify(verify)
|
||||
.setSettings(Settings.builder()
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucket)
|
||||
.put(S3Repository.CLIENT_NAME.getKey(), client)
|
||||
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), bufferSize)
|
||||
.put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), serverSideEncryption)
|
||||
.put(S3Repository.CANNED_ACL_SETTING.getKey(), cannedACL)
|
||||
@ -121,14 +120,10 @@ public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCa
|
||||
return Collections.singletonMap(S3Repository.TYPE,
|
||||
(metadata) -> new S3Repository(metadata, env.settings(), registry, new S3Service(env.settings()) {
|
||||
@Override
|
||||
public synchronized AmazonS3Reference client(String clientName) {
|
||||
return new AmazonS3Reference(new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass));
|
||||
AmazonS3 buildClient(S3ClientSettings clientSettings) {
|
||||
return new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass);
|
||||
}
|
||||
}) {
|
||||
@Override
|
||||
void overrideCredentialsFromClusterState(S3Service awsService) {
|
||||
}
|
||||
});
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -32,9 +32,9 @@ import io.netty.handler.codec.http.HttpRequestEncoder;
|
||||
import io.netty.handler.codec.http.HttpResponse;
|
||||
import io.netty.handler.codec.http.HttpResponseDecoder;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
@ -86,11 +86,11 @@ class NioHttpClient implements Closeable {
|
||||
return list;
|
||||
}
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(NioHttpClient.class);
|
||||
|
||||
private final NioGroup nioGroup;
|
||||
private final Logger logger;
|
||||
|
||||
NioHttpClient() {
|
||||
logger = Loggers.getLogger(NioHttpClient.class, Settings.EMPTY);
|
||||
try {
|
||||
nioGroup = new NioGroup(daemonThreadFactory(Settings.EMPTY, "nio-http-client"), 1,
|
||||
(s) -> new EventHandler(this::onException, s));
|
||||
|
@ -62,7 +62,7 @@ public class SimpleNioTransportTests extends AbstractSimpleTransportTestCase {
|
||||
new NoneCircuitBreakerService()) {
|
||||
|
||||
@Override
|
||||
protected Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout) throws IOException,
|
||||
public Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout) throws IOException,
|
||||
InterruptedException {
|
||||
if (doHandshake) {
|
||||
return super.executeHandshake(node, channel, timeout);
|
||||
|
@ -340,26 +340,22 @@ public class EvilLoggerTests extends ESTestCase {
|
||||
}
|
||||
|
||||
public void testProperties() throws IOException, UserException {
|
||||
final Settings.Builder builder = Settings.builder().put("cluster.name", randomAlphaOfLength(16));
|
||||
if (randomBoolean()) {
|
||||
builder.put("node.name", randomAlphaOfLength(16));
|
||||
}
|
||||
final Settings settings = builder.build();
|
||||
final Settings settings = Settings.builder()
|
||||
.put("cluster.name", randomAlphaOfLength(16))
|
||||
.put("node.name", randomAlphaOfLength(16))
|
||||
.build();
|
||||
setupLogging("minimal", settings);
|
||||
|
||||
assertNotNull(System.getProperty("es.logs.base_path"));
|
||||
|
||||
assertThat(System.getProperty("es.logs.cluster_name"), equalTo(ClusterName.CLUSTER_NAME_SETTING.get(settings).value()));
|
||||
if (Node.NODE_NAME_SETTING.exists(settings)) {
|
||||
assertThat(System.getProperty("es.logs.node_name"), equalTo(Node.NODE_NAME_SETTING.get(settings)));
|
||||
} else {
|
||||
assertNull(System.getProperty("es.logs.node_name"));
|
||||
}
|
||||
assertThat(System.getProperty("es.logs.node_name"), equalTo(Node.NODE_NAME_SETTING.get(settings)));
|
||||
}
|
||||
|
||||
public void testNoNodeNameInPatternWarning() throws IOException, UserException {
|
||||
String nodeName = randomAlphaOfLength(16);
|
||||
LogConfigurator.setNodeName(nodeName);
|
||||
setupLogging("no_node_name");
|
||||
|
||||
final String path =
|
||||
System.getProperty("es.logs.base_path") +
|
||||
System.getProperty("file.separator") +
|
||||
@ -368,10 +364,10 @@ public class EvilLoggerTests extends ESTestCase {
|
||||
assertThat(events.size(), equalTo(2));
|
||||
final String location = "org.elasticsearch.common.logging.LogConfigurator";
|
||||
// the first message is a warning for unsupported configuration files
|
||||
assertLogLine(events.get(0), Level.WARN, location, "\\[unknown\\] Some logging configurations have %marker but don't "
|
||||
+ "have %node_name. We will automatically add %node_name to the pattern to ease the migration for users "
|
||||
+ "who customize log4j2.properties but will stop this behavior in 7.0. You should manually replace "
|
||||
+ "`%node_name` with `\\[%node_name\\]%marker ` in these locations:");
|
||||
assertLogLine(events.get(0), Level.WARN, location, "\\[" + nodeName + "\\] Some logging configurations have "
|
||||
+ "%marker but don't have %node_name. We will automatically add %node_name to the pattern to ease the "
|
||||
+ "migration for users who customize log4j2.properties but will stop this behavior in 7.0. You should "
|
||||
+ "manually replace `%node_name` with `\\[%node_name\\]%marker ` in these locations:");
|
||||
if (Constants.WINDOWS) {
|
||||
assertThat(events.get(1), endsWith("no_node_name\\log4j2.properties"));
|
||||
} else {
|
||||
|
@ -52,7 +52,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase {
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build();
|
||||
IOException ioException = expectThrows(IOException.class, () -> {
|
||||
new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
|
||||
new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
|
||||
});
|
||||
assertTrue(ioException.getMessage(), ioException.getMessage().startsWith(path.toString()));
|
||||
}
|
||||
@ -72,7 +72,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase {
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build();
|
||||
IOException ioException = expectThrows(IOException.class, () -> {
|
||||
new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
|
||||
new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
|
||||
});
|
||||
assertTrue(ioException.getMessage(), ioException.getMessage().startsWith("failed to test writes in data directory"));
|
||||
}
|
||||
@ -97,7 +97,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase {
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
|
||||
.putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build();
|
||||
IOException ioException = expectThrows(IOException.class, () -> {
|
||||
new NodeEnvironment(build, TestEnvironment.newEnvironment(build), nodeId -> {});
|
||||
new NodeEnvironment(build, TestEnvironment.newEnvironment(build));
|
||||
});
|
||||
assertTrue(ioException.getMessage(), ioException.getMessage().startsWith("failed to test writes in data directory"));
|
||||
}
|
||||
|
@ -111,7 +111,6 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
||||
return future;
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/33616")
|
||||
public void testRecoveryWithConcurrentIndexing() throws Exception {
|
||||
final String index = "recovery_with_concurrent_indexing";
|
||||
Response response = client().performRequest(new Request("GET", "_nodes"));
|
||||
@ -184,7 +183,6 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
||||
}
|
||||
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/33616")
|
||||
public void testRelocationWithConcurrentIndexing() throws Exception {
|
||||
final String index = "relocation_with_concurrent_indexing";
|
||||
switch (CLUSTER_TYPE) {
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
package org.elasticsearch.unconfigured_node_name;
|
||||
|
||||
import org.elasticsearch.bootstrap.BootstrapInfo;
|
||||
import org.elasticsearch.common.logging.NodeNameInLogsIntegTestCase;
|
||||
import org.hamcrest.Matcher;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.BufferedReader;
|
||||
@ -30,11 +30,16 @@ import java.nio.file.Path;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase {
|
||||
@Override
|
||||
protected BufferedReader openReader(Path logFile) throws IOException {
|
||||
assumeTrue("We log a line without the node name if we can't install the seccomp filters",
|
||||
BootstrapInfo.isSystemCallFilterInstalled());
|
||||
protected Matcher<String> nodeNameMatcher() {
|
||||
return not("");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BufferedReader openReader(Path logFile) {
|
||||
return AccessController.doPrivileged((PrivilegedAction<BufferedReader>) () -> {
|
||||
try {
|
||||
return Files.newBufferedReader(logFile, StandardCharsets.UTF_8);
|
||||
@ -43,11 +48,4 @@ public class NodeNameInLogsIT extends NodeNameInLogsIntegTestCase {
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void testDummy() {
|
||||
/* Dummy test case so that when we run this test on a platform that
|
||||
* does not support our syscall filters and we skip the test above
|
||||
* we don't fail the entire test run because we skipped all the tests.
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
@ -200,7 +200,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
||||
} catch (QueryShardException|ParsingException e) {
|
||||
valid = false;
|
||||
error = e.getDetailedMessage();
|
||||
} catch (AssertionError|IOException e) {
|
||||
} catch (AssertionError e) {
|
||||
valid = false;
|
||||
error = e.getMessage();
|
||||
} finally {
|
||||
@ -210,7 +210,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
||||
return new ShardValidateQueryResponse(request.shardId(), valid, explanation, error);
|
||||
}
|
||||
|
||||
private String explain(SearchContext context, boolean rewritten) throws IOException {
|
||||
private String explain(SearchContext context, boolean rewritten) {
|
||||
Query query = context.query();
|
||||
if (rewritten && query instanceof MatchNoDocsQuery) {
|
||||
return context.parsedQuery().query().toString();
|
||||
|
@ -22,7 +22,6 @@ import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.Scheduler;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
@ -80,7 +79,7 @@ public final class BulkRequestHandler {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
}, Settings.EMPTY);
|
||||
});
|
||||
bulkRequestSetupSuccessful = true;
|
||||
if (concurrentRequests == 0) {
|
||||
latch.await();
|
||||
|
@ -19,10 +19,9 @@
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
@ -54,11 +53,10 @@ public class Retry {
|
||||
* @param consumer The consumer to which apply the request and listener
|
||||
* @param bulkRequest The bulk request that should be executed.
|
||||
* @param listener A listener that is invoked when the bulk request finishes or completes with an exception. The listener is not
|
||||
* @param settings settings
|
||||
*/
|
||||
public void withBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer, BulkRequest bulkRequest,
|
||||
ActionListener<BulkResponse> listener, Settings settings) {
|
||||
RetryHandler r = new RetryHandler(backoffPolicy, consumer, listener, settings, scheduler);
|
||||
ActionListener<BulkResponse> listener) {
|
||||
RetryHandler r = new RetryHandler(backoffPolicy, consumer, listener, scheduler);
|
||||
r.execute(bulkRequest);
|
||||
}
|
||||
|
||||
@ -68,20 +66,19 @@ public class Retry {
|
||||
*
|
||||
* @param consumer The consumer to which apply the request and listener
|
||||
* @param bulkRequest The bulk request that should be executed.
|
||||
* @param settings settings
|
||||
* @return a future representing the bulk response returned by the client.
|
||||
*/
|
||||
public PlainActionFuture<BulkResponse> withBackoff(BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer,
|
||||
BulkRequest bulkRequest, Settings settings) {
|
||||
BulkRequest bulkRequest) {
|
||||
PlainActionFuture<BulkResponse> future = PlainActionFuture.newFuture();
|
||||
withBackoff(consumer, bulkRequest, future, settings);
|
||||
withBackoff(consumer, bulkRequest, future);
|
||||
return future;
|
||||
}
|
||||
|
||||
static class RetryHandler implements ActionListener<BulkResponse> {
|
||||
private static final RestStatus RETRY_STATUS = RestStatus.TOO_MANY_REQUESTS;
|
||||
private static final Logger logger = LogManager.getLogger(RetryHandler.class);
|
||||
|
||||
private final Logger logger;
|
||||
private final Scheduler scheduler;
|
||||
private final BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer;
|
||||
private final ActionListener<BulkResponse> listener;
|
||||
@ -95,11 +92,10 @@ public class Retry {
|
||||
private volatile ScheduledFuture<?> scheduledRequestFuture;
|
||||
|
||||
RetryHandler(BackoffPolicy backoffPolicy, BiConsumer<BulkRequest, ActionListener<BulkResponse>> consumer,
|
||||
ActionListener<BulkResponse> listener, Settings settings, Scheduler scheduler) {
|
||||
ActionListener<BulkResponse> listener, Scheduler scheduler) {
|
||||
this.backoff = backoffPolicy.iterator();
|
||||
this.consumer = consumer;
|
||||
this.listener = listener;
|
||||
this.logger = Loggers.getLogger(getClass(), settings);
|
||||
this.scheduler = scheduler;
|
||||
// in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood
|
||||
this.startTimestampNanos = System.nanoTime();
|
||||
|
@ -152,4 +152,11 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
||||
clusterService.state(), request.concreteIndex(), request.request().id(), request.request().routing(), request.request().preference()
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getExecutor(ExplainRequest request, ShardId shardId) {
|
||||
IndexService indexService = searchService.getIndicesService().indexServiceSafe(shardId.getIndex());
|
||||
return indexService.getIndexSettings().isSearchThrottled() ? ThreadPool.Names.SEARCH_THROTTLED : super.getExecutor(request,
|
||||
shardId);
|
||||
}
|
||||
}
|
||||
|
@ -70,9 +70,8 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
|
||||
request.indices(), idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState));
|
||||
final OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
|
||||
final String[] concreteIndices;
|
||||
if (remoteClusterIndices.isEmpty() == false && localIndices.indices().length == 0) {
|
||||
// in the case we have one or more remote indices but no local we don't expand to all local indices and just do remote
|
||||
// indices
|
||||
if (localIndices == null) {
|
||||
// in the case we have one or more remote indices but no local we don't expand to all local indices and just do remote indices
|
||||
concreteIndices = Strings.EMPTY_ARRAY;
|
||||
} else {
|
||||
concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, localIndices);
|
||||
|
@ -111,4 +111,11 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
|
||||
protected GetResponse newResponse() {
|
||||
return new GetResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getExecutor(GetRequest request, ShardId shardId) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
return indexService.getIndexSettings().isSearchThrottled() ? ThreadPool.Names.SEARCH_THROTTLED : super.getExecutor(request,
|
||||
shardId);
|
||||
}
|
||||
}
|
||||
|
@ -102,4 +102,11 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getExecutor(MultiGetShardRequest request, ShardId shardId) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
return indexService.getIndexSettings().isSearchThrottled() ? ThreadPool.Names.SEARCH_THROTTLED : super.getExecutor(request,
|
||||
shardId);
|
||||
}
|
||||
}
|
||||
|
@ -24,12 +24,16 @@ import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.ingest.IngestDocument;
|
||||
import org.elasticsearch.ingest.Pipeline;
|
||||
import org.elasticsearch.ingest.CompoundProcessor;
|
||||
import org.elasticsearch.ingest.PipelineProcessor;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.action.ingest.TrackingResultProcessor.decorate;
|
||||
import static org.elasticsearch.ingest.TrackingResultProcessor.decorate;
|
||||
|
||||
class SimulateExecutionService {
|
||||
|
||||
@ -42,11 +46,15 @@ class SimulateExecutionService {
|
||||
}
|
||||
|
||||
SimulateDocumentResult executeDocument(Pipeline pipeline, IngestDocument ingestDocument, boolean verbose) {
|
||||
// Prevent cycles in pipeline decoration
|
||||
final Set<PipelineProcessor> pipelinesSeen = Collections.newSetFromMap(new IdentityHashMap<>());
|
||||
if (verbose) {
|
||||
List<SimulateProcessorResult> processorResultList = new ArrayList<>();
|
||||
CompoundProcessor verbosePipelineProcessor = decorate(pipeline.getCompoundProcessor(), processorResultList);
|
||||
CompoundProcessor verbosePipelineProcessor = decorate(pipeline.getCompoundProcessor(), processorResultList, pipelinesSeen);
|
||||
try {
|
||||
verbosePipelineProcessor.execute(ingestDocument);
|
||||
Pipeline verbosePipeline = new Pipeline(pipeline.getId(), pipeline.getDescription(), pipeline.getVersion(),
|
||||
verbosePipelineProcessor);
|
||||
ingestDocument.executePipeline(verbosePipeline);
|
||||
return new SimulateDocumentVerboseResult(processorResultList);
|
||||
} catch (Exception e) {
|
||||
return new SimulateDocumentVerboseResult(processorResultList);
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.elasticsearch.action.resync;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
@ -28,6 +29,7 @@ import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Represents a batch of operations sent from the primary to its replicas during the primary-replica resync.
|
||||
@ -36,15 +38,17 @@ public final class ResyncReplicationRequest extends ReplicatedWriteRequest<Resyn
|
||||
|
||||
private long trimAboveSeqNo;
|
||||
private Translog.Operation[] operations;
|
||||
private long maxSeenAutoIdTimestampOnPrimary;
|
||||
|
||||
ResyncReplicationRequest() {
|
||||
super();
|
||||
}
|
||||
|
||||
public ResyncReplicationRequest(final ShardId shardId, final long trimAboveSeqNo,
|
||||
final Translog.Operation[] operations) {
|
||||
public ResyncReplicationRequest(final ShardId shardId, final long trimAboveSeqNo, final long maxSeenAutoIdTimestampOnPrimary,
|
||||
final Translog.Operation[]operations) {
|
||||
super(shardId);
|
||||
this.trimAboveSeqNo = trimAboveSeqNo;
|
||||
this.maxSeenAutoIdTimestampOnPrimary = maxSeenAutoIdTimestampOnPrimary;
|
||||
this.operations = operations;
|
||||
}
|
||||
|
||||
@ -52,6 +56,10 @@ public final class ResyncReplicationRequest extends ReplicatedWriteRequest<Resyn
|
||||
return trimAboveSeqNo;
|
||||
}
|
||||
|
||||
public long getMaxSeenAutoIdTimestampOnPrimary() {
|
||||
return maxSeenAutoIdTimestampOnPrimary;
|
||||
}
|
||||
|
||||
public Translog.Operation[] getOperations() {
|
||||
return operations;
|
||||
}
|
||||
@ -73,6 +81,11 @@ public final class ResyncReplicationRequest extends ReplicatedWriteRequest<Resyn
|
||||
} else {
|
||||
trimAboveSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_6_5_0)) {
|
||||
maxSeenAutoIdTimestampOnPrimary = in.readZLong();
|
||||
} else {
|
||||
maxSeenAutoIdTimestampOnPrimary = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP;
|
||||
}
|
||||
operations = in.readArray(Translog.Operation::readOperation, Translog.Operation[]::new);
|
||||
}
|
||||
|
||||
@ -82,6 +95,9 @@ public final class ResyncReplicationRequest extends ReplicatedWriteRequest<Resyn
|
||||
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
out.writeZLong(trimAboveSeqNo);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_6_5_0)) {
|
||||
out.writeZLong(maxSeenAutoIdTimestampOnPrimary);
|
||||
}
|
||||
out.writeArray(Translog.Operation::writeOperation, operations);
|
||||
}
|
||||
|
||||
@ -90,13 +106,13 @@ public final class ResyncReplicationRequest extends ReplicatedWriteRequest<Resyn
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
final ResyncReplicationRequest that = (ResyncReplicationRequest) o;
|
||||
return trimAboveSeqNo == that.trimAboveSeqNo
|
||||
return trimAboveSeqNo == that.trimAboveSeqNo && maxSeenAutoIdTimestampOnPrimary == that.maxSeenAutoIdTimestampOnPrimary
|
||||
&& Arrays.equals(operations, that.operations);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Long.hashCode(trimAboveSeqNo) + 31 * Arrays.hashCode(operations);
|
||||
return Objects.hash(trimAboveSeqNo, maxSeenAutoIdTimestampOnPrimary, operations);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -106,6 +122,7 @@ public final class ResyncReplicationRequest extends ReplicatedWriteRequest<Resyn
|
||||
", timeout=" + timeout +
|
||||
", index='" + index + '\'' +
|
||||
", trimAboveSeqNo=" + trimAboveSeqNo +
|
||||
", maxSeenAutoIdTimestampOnPrimary=" + maxSeenAutoIdTimestampOnPrimary +
|
||||
", ops=" + operations.length +
|
||||
"}";
|
||||
}
|
||||
|
@ -119,6 +119,12 @@ public class TransportResyncReplicationAction extends TransportWriteAction<Resyn
|
||||
|
||||
public static Translog.Location performOnReplica(ResyncReplicationRequest request, IndexShard replica) throws Exception {
|
||||
Translog.Location location = null;
|
||||
/*
|
||||
* Operations received from resync do not have auto_id_timestamp individually, we need to bootstrap this max_seen_timestamp
|
||||
* (at least the highest timestamp from any of these operations) to make sure that we will disable optimization for the same
|
||||
* append-only requests with timestamp (sources of these operations) that are replicated; otherwise we may have duplicates.
|
||||
*/
|
||||
replica.updateMaxUnsafeAutoIdTimestamp(request.getMaxSeenAutoIdTimestampOnPrimary());
|
||||
for (Translog.Operation operation : request.getOperations()) {
|
||||
final Engine.Result operationResult = replica.applyTranslogOperation(operation, Engine.Operation.Origin.REPLICA);
|
||||
if (operationResult.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
|
||||
|
@ -23,6 +23,7 @@ import org.apache.lucene.util.FixedBitSet;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
@ -40,7 +41,7 @@ import java.util.stream.Stream;
|
||||
* which allows to fan out to more shards at the same time without running into rejections even if we are hitting a
|
||||
* large portion of the clusters indices.
|
||||
*/
|
||||
final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<SearchTransportService.CanMatchResponse> {
|
||||
final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<SearchService.CanMatchResponse> {
|
||||
|
||||
private final Function<GroupShardsIterator<SearchShardIterator>, SearchPhase> phaseFactory;
|
||||
private final GroupShardsIterator<SearchShardIterator> shardsIts;
|
||||
@ -67,13 +68,13 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<Searc
|
||||
|
||||
@Override
|
||||
protected void executePhaseOnShard(SearchShardIterator shardIt, ShardRouting shard,
|
||||
SearchActionListener<SearchTransportService.CanMatchResponse> listener) {
|
||||
SearchActionListener<SearchService.CanMatchResponse> listener) {
|
||||
getSearchTransport().sendCanMatch(getConnection(shardIt.getClusterAlias(), shard.currentNodeId()),
|
||||
buildShardSearchRequest(shardIt), getTask(), listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SearchPhase getNextPhase(SearchPhaseResults<SearchTransportService.CanMatchResponse> results,
|
||||
protected SearchPhase getNextPhase(SearchPhaseResults<SearchService.CanMatchResponse> results,
|
||||
SearchPhaseContext context) {
|
||||
|
||||
return phaseFactory.apply(getIterator((BitSetSearchPhaseResults) results, shardsIts));
|
||||
@ -100,7 +101,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<Searc
|
||||
}
|
||||
|
||||
private static final class BitSetSearchPhaseResults extends InitialSearchPhase.
|
||||
SearchPhaseResults<SearchTransportService.CanMatchResponse> {
|
||||
SearchPhaseResults<SearchService.CanMatchResponse> {
|
||||
|
||||
private final FixedBitSet possibleMatches;
|
||||
private int numPossibleMatches;
|
||||
@ -111,7 +112,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<Searc
|
||||
}
|
||||
|
||||
@Override
|
||||
void consumeResult(SearchTransportService.CanMatchResponse result) {
|
||||
void consumeResult(SearchService.CanMatchResponse result) {
|
||||
if (result.canMatch()) {
|
||||
consumeShardFailure(result.getShardIndex());
|
||||
}
|
||||
@ -139,7 +140,7 @@ final class CanMatchPreFilterSearchPhase extends AbstractSearchAsyncAction<Searc
|
||||
}
|
||||
|
||||
@Override
|
||||
Stream<SearchTransportService.CanMatchResponse> getSuccessfulResults() {
|
||||
Stream<SearchService.CanMatchResponse> getSuccessfulResults() {
|
||||
return Stream.empty();
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionListenerResponseHandler;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.HandledTransportAction.ChannelActionListener;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
@ -112,9 +112,9 @@ public class SearchTransportService extends AbstractComponent {
|
||||
}
|
||||
|
||||
public void sendCanMatch(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task, final
|
||||
ActionListener<CanMatchResponse> listener) {
|
||||
ActionListener<SearchService.CanMatchResponse> listener) {
|
||||
transportService.sendChildRequest(connection, QUERY_CAN_MATCH_NAME, request, task,
|
||||
TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, CanMatchResponse::new));
|
||||
TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, SearchService.CanMatchResponse::new));
|
||||
}
|
||||
|
||||
public void sendClearAllScrollContexts(Transport.Connection connection, final ActionListener<TransportResponse> listener) {
|
||||
@ -349,83 +349,54 @@ public class SearchTransportService extends AbstractComponent {
|
||||
|
||||
transportService.registerRequestHandler(QUERY_ACTION_NAME, ThreadPool.Names.SAME, ShardSearchTransportRequest::new,
|
||||
(request, channel, task) -> {
|
||||
searchService.executeQueryPhase(request, (SearchTask) task, new HandledTransportAction.ChannelActionListener<>(
|
||||
searchService.executeQueryPhase(request, (SearchTask) task, new ChannelActionListener<>(
|
||||
channel, QUERY_ACTION_NAME, request));
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, QUERY_ACTION_NAME,
|
||||
(request) -> ((ShardSearchRequest)request).numberOfShards() == 1 ? QueryFetchSearchResult::new : QuerySearchResult::new);
|
||||
|
||||
transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, ThreadPool.Names.SEARCH, QuerySearchRequest::new,
|
||||
transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, ThreadPool.Names.SAME, QuerySearchRequest::new,
|
||||
(request, channel, task) -> {
|
||||
QuerySearchResult result = searchService.executeQueryPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
searchService.executeQueryPhase(request, (SearchTask)task, new ChannelActionListener<>(channel, QUERY_ID_ACTION_NAME,
|
||||
request));
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, QUERY_ID_ACTION_NAME, QuerySearchResult::new);
|
||||
|
||||
transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, ThreadPool.Names.SEARCH, InternalScrollSearchRequest::new,
|
||||
transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, ThreadPool.Names.SAME, InternalScrollSearchRequest::new,
|
||||
(request, channel, task) -> {
|
||||
ScrollQuerySearchResult result = searchService.executeQueryPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
searchService.executeQueryPhase(request, (SearchTask)task, new ChannelActionListener<>(channel, QUERY_SCROLL_ACTION_NAME,
|
||||
request));
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, QUERY_SCROLL_ACTION_NAME, ScrollQuerySearchResult::new);
|
||||
|
||||
transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, ThreadPool.Names.SEARCH, InternalScrollSearchRequest::new,
|
||||
transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, ThreadPool.Names.SAME, InternalScrollSearchRequest::new,
|
||||
(request, channel, task) -> {
|
||||
ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
searchService.executeFetchPhase(request, (SearchTask)task, new ChannelActionListener<>(channel,
|
||||
QUERY_FETCH_SCROLL_ACTION_NAME, request));
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, QUERY_FETCH_SCROLL_ACTION_NAME, ScrollQueryFetchSearchResult::new);
|
||||
|
||||
transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ThreadPool.Names.SEARCH, ShardFetchRequest::new,
|
||||
transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ThreadPool.Names.SAME, ShardFetchRequest::new,
|
||||
(request, channel, task) -> {
|
||||
FetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
searchService.executeFetchPhase(request, (SearchTask)task, new ChannelActionListener<>(channel,
|
||||
FETCH_ID_SCROLL_ACTION_NAME, request));
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, FETCH_ID_SCROLL_ACTION_NAME, FetchSearchResult::new);
|
||||
|
||||
transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ThreadPool.Names.SEARCH, true, true, ShardFetchSearchRequest::new,
|
||||
transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ThreadPool.Names.SAME, true, true, ShardFetchSearchRequest::new,
|
||||
(request, channel, task) -> {
|
||||
FetchSearchResult result = searchService.executeFetchPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
searchService.executeFetchPhase(request, (SearchTask)task, new ChannelActionListener<>(channel, FETCH_ID_ACTION_NAME,
|
||||
request));
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, FetchSearchResult::new);
|
||||
|
||||
// this is cheap, it does not fetch during the rewrite phase, so we can let it quickly execute on a networking thread
|
||||
transportService.registerRequestHandler(QUERY_CAN_MATCH_NAME, ThreadPool.Names.SAME, ShardSearchTransportRequest::new,
|
||||
(request, channel, task) -> {
|
||||
boolean canMatch = searchService.canMatch(request);
|
||||
channel.sendResponse(new CanMatchResponse(canMatch));
|
||||
searchService.canMatch(request, new ChannelActionListener<>(channel, QUERY_CAN_MATCH_NAME, request));
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NAME,
|
||||
(Supplier<TransportResponse>) CanMatchResponse::new);
|
||||
}
|
||||
|
||||
public static final class CanMatchResponse extends SearchPhaseResult {
|
||||
private boolean canMatch;
|
||||
|
||||
public CanMatchResponse() {
|
||||
}
|
||||
|
||||
public CanMatchResponse(boolean canMatch) {
|
||||
this.canMatch = canMatch;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
canMatch = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(canMatch);
|
||||
}
|
||||
|
||||
public boolean canMatch() {
|
||||
return canMatch;
|
||||
}
|
||||
(Supplier<TransportResponse>) SearchService.CanMatchResponse::new);
|
||||
}
|
||||
|
||||
|
||||
|
@ -25,6 +25,7 @@ import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
@ -193,7 +194,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||
searchRequest.indices(), idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState));
|
||||
OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
|
||||
if (remoteClusterIndices.isEmpty()) {
|
||||
executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, remoteClusterIndices, Collections.emptyList(),
|
||||
executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, Collections.emptyList(),
|
||||
(clusterName, nodeId) -> null, clusterState, Collections.emptyMap(), listener, SearchResponse.Clusters.EMPTY);
|
||||
} else {
|
||||
remoteClusterService.collectSearchShards(searchRequest.indicesOptions(), searchRequest.preference(),
|
||||
@ -203,7 +204,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||
BiFunction<String, String, DiscoveryNode> clusterNodeLookup = processRemoteShards(searchShardsResponses,
|
||||
remoteClusterIndices, remoteShardIterators, remoteAliasFilters);
|
||||
SearchResponse.Clusters clusters = buildClusters(localIndices, remoteClusterIndices, searchShardsResponses);
|
||||
executeSearch((SearchTask) task, timeProvider, searchRequest, localIndices, remoteClusterIndices,
|
||||
executeSearch((SearchTask) task, timeProvider, searchRequest, localIndices,
|
||||
remoteShardIterators, clusterNodeLookup, clusterState, remoteAliasFilters, listener,
|
||||
clusters);
|
||||
}, listener::onFailure));
|
||||
@ -219,7 +220,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||
|
||||
static SearchResponse.Clusters buildClusters(OriginalIndices localIndices, Map<String, OriginalIndices> remoteIndices,
|
||||
Map<String, ClusterSearchShardsResponse> searchShardsResponses) {
|
||||
int localClusters = Math.min(localIndices.indices().length, 1);
|
||||
int localClusters = localIndices == null ? 0 : 1;
|
||||
int totalClusters = remoteIndices.size() + localClusters;
|
||||
int successfulClusters = localClusters;
|
||||
for (ClusterSearchShardsResponse searchShardsResponse : searchShardsResponses.values()) {
|
||||
@ -277,8 +278,19 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||
};
|
||||
}
|
||||
|
||||
private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, SearchRequest searchRequest, OriginalIndices localIndices,
|
||||
Map<String, OriginalIndices> remoteClusterIndices, List<SearchShardIterator> remoteShardIterators,
|
||||
private Index[] resolveLocalIndices(OriginalIndices localIndices,
|
||||
IndicesOptions indicesOptions,
|
||||
ClusterState clusterState,
|
||||
SearchTimeProvider timeProvider) {
|
||||
if (localIndices == null) {
|
||||
return Index.EMPTY_ARRAY; //don't search on any local index (happens when only remote indices were specified)
|
||||
}
|
||||
return indexNameExpressionResolver.concreteIndices(clusterState, indicesOptions,
|
||||
timeProvider.getAbsoluteStartMillis(), localIndices.indices());
|
||||
}
|
||||
|
||||
private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, SearchRequest searchRequest,
|
||||
OriginalIndices localIndices, List<SearchShardIterator> remoteShardIterators,
|
||||
BiFunction<String, String, DiscoveryNode> remoteConnections, ClusterState clusterState,
|
||||
Map<String, AliasFilter> remoteAliasMap, ActionListener<SearchResponse> listener,
|
||||
SearchResponse.Clusters clusters) {
|
||||
@ -287,13 +299,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
|
||||
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
|
||||
// of just for the _search api
|
||||
final Index[] indices;
|
||||
if (localIndices.indices().length == 0 && remoteClusterIndices.isEmpty() == false) {
|
||||
indices = Index.EMPTY_ARRAY; // don't search on _all if only remote indices were specified
|
||||
} else {
|
||||
indices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(),
|
||||
timeProvider.getAbsoluteStartMillis(), localIndices.indices());
|
||||
}
|
||||
final Index[] indices = resolveLocalIndices(localIndices, searchRequest.indicesOptions(), clusterState, timeProvider);
|
||||
Map<String, AliasFilter> aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap);
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(),
|
||||
searchRequest.indices());
|
||||
|
@ -36,6 +36,7 @@ import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
@ -57,6 +58,7 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
||||
protected final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
|
||||
final String transportShardAction;
|
||||
private final String shardExecutor;
|
||||
|
||||
protected TransportBroadcastAction(Settings settings, String actionName, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
@ -66,8 +68,9 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
||||
this.transportService = transportService;
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
this.transportShardAction = actionName + "[s]";
|
||||
this.shardExecutor = shardExecutor;
|
||||
|
||||
transportService.registerRequestHandler(transportShardAction, shardRequest, shardExecutor, new ShardTransportHandler());
|
||||
transportService.registerRequestHandler(transportShardAction, shardRequest, ThreadPool.Names.SAME, new ShardTransportHandler());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -276,7 +279,45 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
||||
|
||||
@Override
|
||||
public void messageReceived(ShardRequest request, TransportChannel channel, Task task) throws Exception {
|
||||
channel.sendResponse(shardOperation(request, task));
|
||||
asyncShardOperation(request, task, new ActionListener<ShardResponse>() {
|
||||
@Override
|
||||
public void onResponse(ShardResponse response) {
|
||||
try {
|
||||
channel.sendResponse(response);
|
||||
} catch (Exception e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (Exception e1) {
|
||||
logger.warn(() -> new ParameterizedMessage(
|
||||
"Failed to send error response for action [{}] and request [{}]", actionName, request), e1);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
protected void asyncShardOperation(ShardRequest request, Task task, ActionListener<ShardResponse> listener) {
|
||||
transportService.getThreadPool().executor(getExecutor(request)).execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
listener.onResponse(shardOperation(request, task));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
protected String getExecutor(ShardRequest request) {
|
||||
return shardExecutor;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
@ -49,7 +50,6 @@ import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException;
|
||||
@ -66,8 +66,8 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
||||
protected final TransportService transportService;
|
||||
protected final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
|
||||
final String transportShardAction;
|
||||
final String executor;
|
||||
private final String transportShardAction;
|
||||
private final String executor;
|
||||
|
||||
protected TransportSingleShardAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
@ -104,7 +104,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
||||
protected abstract Response shardOperation(Request request, ShardId shardId) throws IOException;
|
||||
|
||||
protected void asyncShardOperation(Request request, ShardId shardId, ActionListener<Response> listener) throws IOException {
|
||||
threadPool.executor(this.executor).execute(new AbstractRunnable() {
|
||||
threadPool.executor(getExecutor(request, shardId)).execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
@ -274,25 +274,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
||||
@Override
|
||||
public void messageReceived(Request request, final TransportChannel channel, Task task) throws Exception {
|
||||
// if we have a local operation, execute it on a thread since we don't spawn
|
||||
execute(request, new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response result) {
|
||||
try {
|
||||
channel.sendResponse(result);
|
||||
} catch (Exception e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (Exception e1) {
|
||||
logger.warn("failed to send response for get", e1);
|
||||
}
|
||||
}
|
||||
});
|
||||
execute(request, new HandledTransportAction.ChannelActionListener<>(channel, actionName, request));
|
||||
}
|
||||
}
|
||||
|
||||
@ -303,25 +285,8 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("executing [{}] on shard [{}]", request, request.internalShardId);
|
||||
}
|
||||
asyncShardOperation(request, request.internalShardId, new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response response) {
|
||||
try {
|
||||
channel.sendResponse(response);
|
||||
} catch (IOException e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (IOException e1) {
|
||||
throw new UncheckedIOException(e1);
|
||||
}
|
||||
}
|
||||
});
|
||||
asyncShardOperation(request, request.internalShardId, new HandledTransportAction.ChannelActionListener<>(channel,
|
||||
transportShardAction, request));
|
||||
}
|
||||
}
|
||||
/**
|
||||
@ -344,4 +309,8 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
||||
return concreteIndex;
|
||||
}
|
||||
}
|
||||
|
||||
protected String getExecutor(Request request, ShardId shardId) {
|
||||
return executor;
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user