Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
73cdfaf07b
|
@ -209,10 +209,6 @@ The distribution for each project will be created under the @build/distributions
|
|||
|
||||
See the "TESTING":TESTING.asciidoc file for more information about running the Elasticsearch test suite.
|
||||
|
||||
h3. Upgrading from Elasticsearch 1.x?
|
||||
h3. Upgrading from older Elasticsearch versions
|
||||
|
||||
In order to ensure a smooth upgrade process from earlier versions of
|
||||
Elasticsearch (1.x), it is required to perform a full cluster restart. Please
|
||||
see the "setup reference":
|
||||
https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html
|
||||
for more details on the upgrade process.
|
||||
In order to ensure a smooth upgrade process from earlier versions of Elasticsearch, please see our "upgrade documentation":https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html for more details on the upgrade process.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %m%n
|
||||
|
||||
# Do not log at all if it is not really critical - we're in a benchmark
|
||||
rootLogger.level = error
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %m%n
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.console.ref = console
|
||||
|
|
|
@ -110,6 +110,7 @@ import org.elasticsearch.protocol.xpack.XPackInfoRequest;
|
|||
import org.elasticsearch.protocol.xpack.XPackUsageRequest;
|
||||
import org.elasticsearch.protocol.xpack.indexlifecycle.SetIndexLifecyclePolicyRequest;
|
||||
import org.elasticsearch.protocol.xpack.license.PutLicenseRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.script.mustache.MultiSearchTemplateRequest;
|
||||
|
@ -137,7 +138,7 @@ final class RequestConverters {
|
|||
static Request cancelTasks(CancelTasksRequest cancelTasksRequest) {
|
||||
Request request = new Request(HttpPost.METHOD_NAME, "/_tasks/_cancel");
|
||||
Params params = new Params(request);
|
||||
params.withTimeout(cancelTasksRequest.getTimeout())
|
||||
params.withTimeout(cancelTasksRequest.getTimeout())
|
||||
.withTaskId(cancelTasksRequest.getTaskId())
|
||||
.withNodes(cancelTasksRequest.getNodes())
|
||||
.withParentTaskId(cancelTasksRequest.getParentTaskId())
|
||||
|
@ -1134,6 +1135,18 @@ final class RequestConverters {
|
|||
return request;
|
||||
}
|
||||
|
||||
static Request xPackWatcherDeleteWatch(DeleteWatchRequest deleteWatchRequest) {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_xpack")
|
||||
.addPathPartAsIs("watcher")
|
||||
.addPathPartAsIs("watch")
|
||||
.addPathPart(deleteWatchRequest.getId())
|
||||
.build();
|
||||
|
||||
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request xpackUsage(XPackUsageRequest usageRequest) {
|
||||
Request request = new Request(HttpGet.METHOD_NAME, "/_xpack/usage");
|
||||
Params parameters = new Params(request);
|
||||
|
|
|
@ -19,12 +19,15 @@
|
|||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.singleton;
|
||||
|
||||
public final class WatcherClient {
|
||||
|
||||
|
@ -61,4 +64,31 @@ public final class WatcherClient {
|
|||
restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xPackWatcherPutWatch, options,
|
||||
PutWatchResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a watch from the cluster
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html">
|
||||
* the docs</a> for more.
|
||||
* @param request the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
*/
|
||||
public DeleteWatchResponse deleteWatch(DeleteWatchRequest request, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::xPackWatcherDeleteWatch, options,
|
||||
DeleteWatchResponse::fromXContent, singleton(404));
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously deletes a watch from the cluster
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-delete-watch.html">
|
||||
* the docs</a> for more.
|
||||
* @param request the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
*/
|
||||
public void deleteWatchAsync(DeleteWatchRequest request, RequestOptions options, ActionListener<DeleteWatchResponse> listener) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xPackWatcherDeleteWatch, options,
|
||||
DeleteWatchResponse::fromXContent, listener, singleton(404));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -127,6 +127,7 @@ import org.elasticsearch.index.rankeval.RatedRequest;
|
|||
import org.elasticsearch.index.rankeval.RestRankEvalAction;
|
||||
import org.elasticsearch.protocol.xpack.XPackInfoRequest;
|
||||
import org.elasticsearch.protocol.xpack.indexlifecycle.SetIndexLifecyclePolicyRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
import org.elasticsearch.repositories.fs.FsRepository;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
|
@ -2600,6 +2601,17 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
assertThat(request.getParameters(), equalTo(expectedParams));
|
||||
}
|
||||
|
||||
public void testXPackDeleteWatch() {
|
||||
DeleteWatchRequest deleteWatchRequest = new DeleteWatchRequest();
|
||||
String watchId = randomAlphaOfLength(10);
|
||||
deleteWatchRequest.setId(watchId);
|
||||
|
||||
Request request = RequestConverters.xPackWatcherDeleteWatch(deleteWatchRequest);
|
||||
assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
|
||||
assertEquals("/_xpack/watcher/watch/" + watchId, request.getEndpoint());
|
||||
assertThat(request.getEntity(), nullValue());
|
||||
}
|
||||
|
||||
/**
|
||||
* Randomize the {@link FetchSourceContext} request parameters.
|
||||
*/
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.client;
|
|||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
|
||||
|
||||
|
@ -30,6 +32,13 @@ public class WatcherIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
public void testPutWatch() throws Exception {
|
||||
String watchId = randomAlphaOfLength(10);
|
||||
PutWatchResponse putWatchResponse = createWatch(watchId);
|
||||
assertThat(putWatchResponse.isCreated(), is(true));
|
||||
assertThat(putWatchResponse.getId(), is(watchId));
|
||||
assertThat(putWatchResponse.getVersion(), is(1L));
|
||||
}
|
||||
|
||||
private PutWatchResponse createWatch(String watchId) throws Exception {
|
||||
String json = "{ \n" +
|
||||
" \"trigger\": { \"schedule\": { \"interval\": \"10h\" } },\n" +
|
||||
" \"input\": { \"none\": {} },\n" +
|
||||
|
@ -37,10 +46,30 @@ public class WatcherIT extends ESRestHighLevelClientTestCase {
|
|||
"}";
|
||||
BytesReference bytesReference = new BytesArray(json);
|
||||
PutWatchRequest putWatchRequest = new PutWatchRequest(watchId, bytesReference, XContentType.JSON);
|
||||
PutWatchResponse putWatchResponse = highLevelClient().xpack().watcher().putWatch(putWatchRequest, RequestOptions.DEFAULT);
|
||||
assertThat(putWatchResponse.isCreated(), is(true));
|
||||
assertThat(putWatchResponse.getId(), is(watchId));
|
||||
assertThat(putWatchResponse.getVersion(), is(1L));
|
||||
return highLevelClient().xpack().watcher().putWatch(putWatchRequest, RequestOptions.DEFAULT);
|
||||
}
|
||||
|
||||
public void testDeleteWatch() throws Exception {
|
||||
// delete watch that exists
|
||||
{
|
||||
String watchId = randomAlphaOfLength(10);
|
||||
createWatch(watchId);
|
||||
DeleteWatchResponse deleteWatchResponse = highLevelClient().xpack().watcher().deleteWatch(new DeleteWatchRequest(watchId),
|
||||
RequestOptions.DEFAULT);
|
||||
assertThat(deleteWatchResponse.getId(), is(watchId));
|
||||
assertThat(deleteWatchResponse.getVersion(), is(2L));
|
||||
assertThat(deleteWatchResponse.isFound(), is(true));
|
||||
}
|
||||
|
||||
// delete watch that does not exist
|
||||
{
|
||||
String watchId = randomAlphaOfLength(10);
|
||||
DeleteWatchResponse deleteWatchResponse = highLevelClient().xpack().watcher().deleteWatch(new DeleteWatchRequest(watchId),
|
||||
RequestOptions.DEFAULT);
|
||||
assertThat(deleteWatchResponse.getId(), is(watchId));
|
||||
assertThat(deleteWatchResponse.getVersion(), is(1L));
|
||||
assertThat(deleteWatchResponse.isFound(), is(false));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,6 +26,8 @@ import org.elasticsearch.client.RestHighLevelClient;
|
|||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.DeleteWatchResponse;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest;
|
||||
import org.elasticsearch.protocol.xpack.watcher.PutWatchResponse;
|
||||
|
||||
|
@ -34,7 +36,7 @@ import java.util.concurrent.TimeUnit;
|
|||
|
||||
public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testPutWatch() throws Exception {
|
||||
public void testWatcher() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
|
@ -88,5 +90,46 @@ public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
{
|
||||
//tag::x-pack-delete-watch-execute
|
||||
DeleteWatchRequest request = new DeleteWatchRequest("my_watch_id");
|
||||
DeleteWatchResponse response = client.xpack().watcher().deleteWatch(request, RequestOptions.DEFAULT);
|
||||
//end::x-pack-delete-watch-execute
|
||||
|
||||
//tag::x-pack-delete-watch-response
|
||||
String watchId = response.getId(); // <1>
|
||||
boolean found = response.isFound(); // <2>
|
||||
long version = response.getVersion(); // <3>
|
||||
//end::x-pack-delete-watch-response
|
||||
}
|
||||
|
||||
{
|
||||
DeleteWatchRequest request = new DeleteWatchRequest("my_other_watch_id");
|
||||
// tag::x-pack-delete-watch-execute-listener
|
||||
ActionListener<DeleteWatchResponse> listener = new ActionListener<DeleteWatchResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteWatchResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-delete-watch-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-delete-watch-execute-async
|
||||
client.xpack().watcher().deleteWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-delete-watch-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -7,13 +7,13 @@ logger.action.level = debug
|
|||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %m%n
|
||||
|
||||
appender.rolling.type = RollingFile
|
||||
appender.rolling.name = rolling
|
||||
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
|
||||
appender.rolling.layout.type = PatternLayout
|
||||
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
|
||||
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n
|
||||
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
|
||||
appender.rolling.policies.type = Policies
|
||||
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
|
||||
|
@ -38,7 +38,7 @@ appender.deprecation_rolling.type = RollingFile
|
|||
appender.deprecation_rolling.name = deprecation_rolling
|
||||
appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
|
||||
appender.deprecation_rolling.layout.type = PatternLayout
|
||||
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
|
||||
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n
|
||||
appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz
|
||||
appender.deprecation_rolling.policies.type = Policies
|
||||
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||
|
@ -55,7 +55,7 @@ appender.index_search_slowlog_rolling.type = RollingFile
|
|||
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
|
||||
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log
|
||||
appender.index_search_slowlog_rolling.layout.type = PatternLayout
|
||||
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
|
||||
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n
|
||||
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%i.log.gz
|
||||
appender.index_search_slowlog_rolling.policies.type = Policies
|
||||
appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||
|
@ -72,7 +72,7 @@ appender.index_indexing_slowlog_rolling.type = RollingFile
|
|||
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
|
||||
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log
|
||||
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
|
||||
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
|
||||
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n
|
||||
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.log.gz
|
||||
appender.index_indexing_slowlog_rolling.policies.type = Policies
|
||||
appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||
|
|
|
@ -54,11 +54,14 @@ The Java High Level REST Client supports the following Miscellaneous APIs:
|
|||
* <<java-rest-high-main>>
|
||||
* <<java-rest-high-ping>>
|
||||
* <<java-rest-high-x-pack-info>>
|
||||
* <<java-rest-high-x-pack-watcher-put-watch>>
|
||||
* <<java-rest-high-x-pack-watcher-delete-watch>>
|
||||
|
||||
include::miscellaneous/main.asciidoc[]
|
||||
include::miscellaneous/ping.asciidoc[]
|
||||
include::x-pack/x-pack-info.asciidoc[]
|
||||
include::x-pack/watcher/put-watch.asciidoc[]
|
||||
include::x-pack/watcher/delete-watch.asciidoc[]
|
||||
|
||||
== Indices APIs
|
||||
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
[[java-rest-high-x-pack-watcher-delete-watch]]
|
||||
=== X-Pack Delete Watch API
|
||||
|
||||
[[java-rest-high-x-pack-watcher-delete-watch-execution]]
|
||||
==== Execution
|
||||
|
||||
A watch can be deleted as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/WatcherDocumentationIT.java[x-pack-delete-watch-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-x-pack-watcher-delete-watch-response]]
|
||||
==== Response
|
||||
|
||||
The returned `DeleteWatchResponse` contains `found`, `id`,
|
||||
and `version` information.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/WatcherDocumentationIT.java[x-pack-put-watch-response]
|
||||
--------------------------------------------------
|
||||
<1> `_id` contains id of the watch
|
||||
<2> `found` is a boolean indicating whether the watch was found
|
||||
<3> `_version` returns the version of the deleted watch
|
||||
|
||||
[[java-rest-high-x-pack-watcher-delete-watch-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
This request can be executed asynchronously:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/WatcherDocumentationIT.java[x-pack-delete-watch-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `DeleteWatchRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `DeleteWatchResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/WatcherDocumentationIT.java[x-pack-delete-watch-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
|
@ -1,5 +1,5 @@
|
|||
[[java-rest-high-x-pack-watcher-put-watch]]
|
||||
=== X-Pack Info API
|
||||
=== X-Pack Put Watch API
|
||||
|
||||
[[java-rest-high-x-pack-watcher-put-watch-execution]]
|
||||
==== Execution
|
||||
|
@ -16,7 +16,7 @@ include-tagged::{doc-tests}/WatcherDocumentationIT.java[x-pack-put-watch-execute
|
|||
[[java-rest-high-x-pack-watcher-put-watch-response]]
|
||||
==== Response
|
||||
|
||||
The returned `XPackPutWatchResponse` contain `created`, `id`,
|
||||
The returned `PutWatchResponse` contains `created`, `id`,
|
||||
and `version` information.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
|
@ -36,7 +36,7 @@ This request can be executed asynchronously:
|
|||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/WatcherDocumentationIT.java[x-pack-put-watch-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `XPackPutWatchRequest` to execute and the `ActionListener` to use when
|
||||
<1> The `PutWatchRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
|
@ -44,7 +44,7 @@ completed the `ActionListener` is called back using the `onResponse` method
|
|||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `XPackPutWatchResponse` looks like:
|
||||
A typical listener for `PutWatchResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -653,7 +653,7 @@ GET /_search
|
|||
// CONSOLE
|
||||
|
||||
In the above example, buckets will be created for all the tags that has the word `sport` in them, except those starting
|
||||
with `water_` (so the tag `water_sports` will no be aggregated). The `include` regular expression will determine what
|
||||
with `water_` (so the tag `water_sports` will not be aggregated). The `include` regular expression will determine what
|
||||
values are "allowed" to be aggregated, while the `exclude` determines the values that should not be aggregated. When
|
||||
both are defined, the `exclude` has precedence, meaning, the `include` is evaluated first and only then the `exclude`.
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ appender.index_search_slowlog_rolling.type = RollingFile
|
|||
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
|
||||
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log
|
||||
appender.index_search_slowlog_rolling.layout.type = PatternLayout
|
||||
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
|
||||
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n
|
||||
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%i.log.gz
|
||||
appender.index_search_slowlog_rolling.policies.type = Policies
|
||||
appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||
|
@ -103,7 +103,7 @@ appender.index_indexing_slowlog_rolling.type = RollingFile
|
|||
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
|
||||
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log
|
||||
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
|
||||
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
|
||||
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n
|
||||
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.log.gz
|
||||
appender.index_indexing_slowlog_rolling.policies.type = Policies
|
||||
appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
||||
|
|
|
@ -257,6 +257,9 @@ and there are multiple indices referenced by an alias, then writes will not be a
|
|||
It is possible to specify an index associated with an alias as a write index using both the aliases API
|
||||
and index creation API.
|
||||
|
||||
Setting an index to be the write index with an alias also affects how the alias is manipulated during
|
||||
Rollover (see <<indices-rollover-index, Rollover With Write Index>>).
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_aliases
|
||||
|
|
|
@ -4,10 +4,19 @@
|
|||
The rollover index API rolls an alias over to a new index when the existing
|
||||
index is considered to be too large or too old.
|
||||
|
||||
The API accepts a single alias name and a list of `conditions`. The alias
|
||||
must point to a single index only. If the index satisfies the specified
|
||||
conditions then a new index is created and the alias is switched to point to
|
||||
the new index.
|
||||
The API accepts a single alias name and a list of `conditions`. The alias must point to a write index for
|
||||
a Rollover request to be valid. There are two ways this can be achieved, and depending on the configuration, the
|
||||
alias metadata will be updated differently. The two scenarios are as follows:
|
||||
|
||||
- The alias only points to a single index with `is_write_index` not configured (defaults to `null`).
|
||||
|
||||
In this scenario, the original index will have their rollover alias will be added to the newly created index, and removed
|
||||
from the original (rolled-over) index.
|
||||
|
||||
- The alias points to one or more indices with `is_write_index` set to `true` on the index to be rolled over (the write index).
|
||||
|
||||
In this scenario, the write index will have its rollover alias' `is_write_index` set to `false`, while the newly created index
|
||||
will now have the rollover alias pointing to it as the write index with `is_write_index` as `true`.
|
||||
|
||||
|
||||
[source,js]
|
||||
|
@ -231,3 +240,98 @@ POST /logs_write/_rollover?dry_run
|
|||
Because the rollover operation creates a new index to rollover to, the
|
||||
<<create-index-wait-for-active-shards,`wait_for_active_shards`>> setting on
|
||||
index creation applies to the rollover action as well.
|
||||
|
||||
[[indices-rollover-is-write-index]]
|
||||
[float]
|
||||
=== Write Index Alias Behavior
|
||||
|
||||
The rollover alias when rolling over a write index that has `is_write_index` explicitly set to `true` is not
|
||||
swapped during rollover actions. Since having an alias point to multiple indices is ambiguous in distinguishing
|
||||
which is the correct write index to roll over, it is not valid to rollover an alias that points to multiple indices.
|
||||
For this reason, the default behavior is to swap which index is being pointed to by the write-oriented alias. This
|
||||
was `logs_write` in some of the above examples. Since setting `is_write_index` enables an alias to point to multiple indices
|
||||
while also being explicit as to which is the write index that rollover should target, removing the alias from the rolled over
|
||||
index is not necessary. This simplifies things by allowing for one alias to behave both as the write and read aliases for
|
||||
indices that are being managed with Rollover.
|
||||
|
||||
Look at the behavior of the aliases in the following example where `is_write_index` is set on the rolled over index.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_logs_index-000001
|
||||
{
|
||||
"aliases": {
|
||||
"logs": { "is_write_index": true } <1>
|
||||
}
|
||||
}
|
||||
|
||||
PUT logs/_doc/1
|
||||
{
|
||||
"message": "a dummy log"
|
||||
}
|
||||
|
||||
POST logs/_refresh
|
||||
|
||||
POST /logs/_rollover
|
||||
{
|
||||
"conditions": {
|
||||
"max_docs": "1"
|
||||
}
|
||||
}
|
||||
|
||||
PUT logs/_doc/2 <2>
|
||||
{
|
||||
"message": "a newer log"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> configures `my_logs_index` as the write index for the `logs` alias
|
||||
<2> newly indexed documents against the `logs` alias will write to the new index
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"_index" : "my_logs_index-000002",
|
||||
"_type" : "_doc",
|
||||
"_id" : "2",
|
||||
"_version" : 1,
|
||||
"result" : "created",
|
||||
"_shards" : {
|
||||
"total" : 2,
|
||||
"successful" : 1,
|
||||
"failed" : 0
|
||||
},
|
||||
"_seq_no" : 0,
|
||||
"_primary_term" : 1
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
//////////////////////////
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET _alias
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
//////////////////////////
|
||||
|
||||
After the rollover, the alias metadata for the two indices will have the `is_write_index` setting
|
||||
reflect each index's role, with the newly created index as the write index.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"my_logs_index-000002": {
|
||||
"aliases": {
|
||||
"logs": { "is_write_index": true }
|
||||
}
|
||||
},
|
||||
"my_logs_index-000001": {
|
||||
"aliases": {
|
||||
"logs": { "is_write_index" : false }
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
|
|
@ -5,10 +5,10 @@ The fuzzy query uses similarity based on Levenshtein edit distance.
|
|||
|
||||
==== String fields
|
||||
|
||||
The `fuzzy` query generates all possible matching terms that are within the
|
||||
The `fuzzy` query generates matching terms that are within the
|
||||
maximum edit distance specified in `fuzziness` and then checks the term
|
||||
dictionary to find out which of those generated terms actually exist in the
|
||||
index.
|
||||
index. The final query uses up to `max_expansions` matching terms.
|
||||
|
||||
Here is a simple example:
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ appender.rolling.type = RollingFile <1>
|
|||
appender.rolling.name = rolling
|
||||
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log <2>
|
||||
appender.rolling.layout.type = PatternLayout
|
||||
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
|
||||
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] [%node_name]%marker %.-10000m%n
|
||||
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz <3>
|
||||
appender.rolling.policies.type = Policies
|
||||
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy <4>
|
||||
|
|
|
@ -68,8 +68,7 @@ public final class GrokProcessor extends AbstractProcessor {
|
|||
throw new IllegalArgumentException("Provided Grok expressions do not match field value: [" + fieldValue + "]");
|
||||
}
|
||||
|
||||
matches.entrySet().stream()
|
||||
.forEach((e) -> ingestDocument.setFieldValue(e.getKey(), e.getValue()));
|
||||
matches.forEach(ingestDocument::setFieldValue);
|
||||
|
||||
if (traceMatch) {
|
||||
if (matchPatterns.size() > 1) {
|
||||
|
|
|
@ -67,10 +67,9 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
assertThat(ingestDocument.getFieldValue(fieldName, Integer.class), equalTo(10));
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32370")
|
||||
public void testConvertIntHexError() {
|
||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||
String value = "0x" + randomAlphaOfLengthBetween(1, 10);
|
||||
String value = "0xnotanumber";
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, value);
|
||||
Processor processor = new ConvertProcessor(randomAlphaOfLength(10), fieldName, fieldName, Type.INTEGER, false);
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument));
|
||||
|
@ -138,10 +137,9 @@ public class ConvertProcessorTests extends ESTestCase {
|
|||
assertThat(ingestDocument.getFieldValue(fieldName, Long.class), equalTo(10L));
|
||||
}
|
||||
|
||||
@AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/32370")
|
||||
public void testConvertLongHexError() {
|
||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||
String value = "0x" + randomAlphaOfLengthBetween(1, 10);
|
||||
String value = "0xnotanumber";
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, value);
|
||||
Processor processor = new ConvertProcessor(randomAlphaOfLength(10), fieldName, fieldName, Type.LONG, false);
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument));
|
||||
|
|
|
@ -69,17 +69,14 @@ final class Compiler {
|
|||
/**
|
||||
* A secure class loader used to define Painless scripts.
|
||||
*/
|
||||
static final class Loader extends SecureClassLoader {
|
||||
final class Loader extends SecureClassLoader {
|
||||
private final AtomicInteger lambdaCounter = new AtomicInteger(0);
|
||||
private final PainlessLookup painlessLookup;
|
||||
|
||||
/**
|
||||
* @param parent The parent ClassLoader.
|
||||
*/
|
||||
Loader(ClassLoader parent, PainlessLookup painlessLookup) {
|
||||
Loader(ClassLoader parent) {
|
||||
super(parent);
|
||||
|
||||
this.painlessLookup = painlessLookup;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -90,6 +87,15 @@ final class Compiler {
|
|||
*/
|
||||
@Override
|
||||
public Class<?> findClass(String name) throws ClassNotFoundException {
|
||||
if (scriptClass.getName().equals(name)) {
|
||||
return scriptClass;
|
||||
}
|
||||
if (factoryClass != null && factoryClass.getName().equals(name)) {
|
||||
return factoryClass;
|
||||
}
|
||||
if (statefulFactoryClass != null && statefulFactoryClass.getName().equals(name)) {
|
||||
return statefulFactoryClass;
|
||||
}
|
||||
Class<?> found = painlessLookup.getClassFromBinaryName(name);
|
||||
|
||||
return found != null ? found : super.findClass(name);
|
||||
|
@ -139,13 +145,23 @@ final class Compiler {
|
|||
* {@link Compiler}'s specified {@link PainlessLookup}.
|
||||
*/
|
||||
public Loader createLoader(ClassLoader parent) {
|
||||
return new Loader(parent, painlessLookup);
|
||||
return new Loader(parent);
|
||||
}
|
||||
|
||||
/**
|
||||
* The class/interface the script is guaranteed to derive/implement.
|
||||
* The class/interface the script will implement.
|
||||
*/
|
||||
private final Class<?> base;
|
||||
private final Class<?> scriptClass;
|
||||
|
||||
/**
|
||||
* The class/interface to create the {@code scriptClass} instance.
|
||||
*/
|
||||
private final Class<?> factoryClass;
|
||||
|
||||
/**
|
||||
* An optional class/interface to create the {@code factoryClass} instance.
|
||||
*/
|
||||
private final Class<?> statefulFactoryClass;
|
||||
|
||||
/**
|
||||
* The whitelist the script will use.
|
||||
|
@ -154,11 +170,15 @@ final class Compiler {
|
|||
|
||||
/**
|
||||
* Standard constructor.
|
||||
* @param base The class/interface the script is guaranteed to derive/implement.
|
||||
* @param scriptClass The class/interface the script will implement.
|
||||
* @param factoryClass An optional class/interface to create the {@code scriptClass} instance.
|
||||
* @param statefulFactoryClass An optional class/interface to create the {@code factoryClass} instance.
|
||||
* @param painlessLookup The whitelist the script will use.
|
||||
*/
|
||||
Compiler(Class<?> base, PainlessLookup painlessLookup) {
|
||||
this.base = base;
|
||||
Compiler(Class<?> scriptClass, Class<?> factoryClass, Class<?> statefulFactoryClass, PainlessLookup painlessLookup) {
|
||||
this.scriptClass = scriptClass;
|
||||
this.factoryClass = factoryClass;
|
||||
this.statefulFactoryClass = statefulFactoryClass;
|
||||
this.painlessLookup = painlessLookup;
|
||||
}
|
||||
|
||||
|
@ -177,7 +197,7 @@ final class Compiler {
|
|||
" plugin if a script longer than this length is a requirement.");
|
||||
}
|
||||
|
||||
ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, base);
|
||||
ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, scriptClass);
|
||||
SSource root = Walker.buildPainlessTree(scriptClassInfo, reserved, name, source, settings, painlessLookup,
|
||||
null);
|
||||
root.analyze(painlessLookup);
|
||||
|
@ -209,7 +229,7 @@ final class Compiler {
|
|||
" plugin if a script longer than this length is a requirement.");
|
||||
}
|
||||
|
||||
ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, base);
|
||||
ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, scriptClass);
|
||||
SSource root = Walker.buildPainlessTree(scriptClassInfo, new MainMethodReserved(), name, source, settings, painlessLookup,
|
||||
debugStream);
|
||||
root.analyze(painlessLookup);
|
||||
|
|
|
@ -368,7 +368,7 @@ public final class Def {
|
|||
ref = new FunctionRef(clazz, interfaceMethod, call, handle.type(), captures.length);
|
||||
} else {
|
||||
// whitelist lookup
|
||||
ref = new FunctionRef(painlessLookup, clazz, type, call, captures.length);
|
||||
ref = FunctionRef.resolveFromLookup(painlessLookup, clazz, type, call, captures.length);
|
||||
}
|
||||
final CallSite callSite = LambdaBootstrap.lambdaBootstrap(
|
||||
methodHandlesLookup,
|
||||
|
|
|
@ -20,13 +20,16 @@
|
|||
package org.elasticsearch.painless;
|
||||
|
||||
import org.elasticsearch.painless.lookup.PainlessClass;
|
||||
import org.elasticsearch.painless.lookup.PainlessConstructor;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookup;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||
import org.objectweb.asm.Type;
|
||||
|
||||
import java.lang.invoke.MethodType;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.painless.WriterConstants.CLASS_NAME;
|
||||
import static org.objectweb.asm.Opcodes.H_INVOKEINTERFACE;
|
||||
|
@ -59,8 +62,10 @@ public class FunctionRef {
|
|||
|
||||
/** interface method */
|
||||
public final PainlessMethod interfaceMethod;
|
||||
/** delegate method */
|
||||
public final PainlessMethod delegateMethod;
|
||||
/** delegate method type parameters */
|
||||
public final List<Class<?>> delegateTypeParameters;
|
||||
/** delegate method return type */
|
||||
public final Class<?> delegateReturnType;
|
||||
|
||||
/** factory method type descriptor */
|
||||
public final String factoryDescriptor;
|
||||
|
@ -80,9 +85,47 @@ public class FunctionRef {
|
|||
* @param call the right hand side of a method reference expression
|
||||
* @param numCaptures number of captured arguments
|
||||
*/
|
||||
public FunctionRef(PainlessLookup painlessLookup, Class<?> expected, String type, String call, int numCaptures) {
|
||||
this(expected, painlessLookup.getPainlessStructFromJavaClass(expected).functionalMethod,
|
||||
lookup(painlessLookup, expected, type, call, numCaptures > 0), numCaptures);
|
||||
public static FunctionRef resolveFromLookup(
|
||||
PainlessLookup painlessLookup, Class<?> expected, String type, String call, int numCaptures) {
|
||||
|
||||
if ("new".equals(call)) {
|
||||
return new FunctionRef(expected, painlessLookup.getPainlessStructFromJavaClass(expected).functionalMethod,
|
||||
lookup(painlessLookup, expected, type), numCaptures);
|
||||
} else {
|
||||
return new FunctionRef(expected, painlessLookup.getPainlessStructFromJavaClass(expected).functionalMethod,
|
||||
lookup(painlessLookup, expected, type, call, numCaptures > 0), numCaptures);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new FunctionRef (already resolved)
|
||||
* @param expected functional interface type to implement
|
||||
* @param interfaceMethod functional interface method
|
||||
* @param delegateConstructor implementation constructor
|
||||
* @param numCaptures number of captured arguments
|
||||
*/
|
||||
public FunctionRef(Class<?> expected, PainlessMethod interfaceMethod, PainlessConstructor delegateConstructor, int numCaptures) {
|
||||
Constructor<?> javaConstructor = delegateConstructor.javaConstructor;
|
||||
MethodType delegateMethodType = delegateConstructor.methodType;
|
||||
|
||||
interfaceMethodName = interfaceMethod.name;
|
||||
factoryMethodType = MethodType.methodType(expected,
|
||||
delegateMethodType.dropParameterTypes(numCaptures, delegateMethodType.parameterCount()));
|
||||
interfaceMethodType = interfaceMethod.methodType.dropParameterTypes(0, 1);
|
||||
|
||||
delegateClassName = javaConstructor.getDeclaringClass().getName();
|
||||
isDelegateInterface = false;
|
||||
delegateInvokeType = H_NEWINVOKESPECIAL;
|
||||
delegateMethodName = PainlessLookupUtility.CONSTRUCTOR_NAME;
|
||||
this.delegateMethodType = delegateMethodType.dropParameterTypes(0, numCaptures);
|
||||
|
||||
this.interfaceMethod = interfaceMethod;
|
||||
delegateTypeParameters = delegateConstructor.typeParameters;
|
||||
delegateReturnType = void.class;
|
||||
|
||||
factoryDescriptor = factoryMethodType.toMethodDescriptorString();
|
||||
interfaceType = Type.getMethodType(interfaceMethodType.toMethodDescriptorString());
|
||||
delegateType = Type.getMethodType(this.delegateMethodType.toMethodDescriptorString());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -112,9 +155,7 @@ public class FunctionRef {
|
|||
isDelegateInterface = delegateMethod.target.isInterface();
|
||||
}
|
||||
|
||||
if ("<init>".equals(delegateMethod.name)) {
|
||||
delegateInvokeType = H_NEWINVOKESPECIAL;
|
||||
} else if (Modifier.isStatic(delegateMethod.modifiers)) {
|
||||
if (Modifier.isStatic(delegateMethod.modifiers)) {
|
||||
delegateInvokeType = H_INVOKESTATIC;
|
||||
} else if (delegateMethod.target.isInterface()) {
|
||||
delegateInvokeType = H_INVOKEINTERFACE;
|
||||
|
@ -126,7 +167,8 @@ public class FunctionRef {
|
|||
this.delegateMethodType = delegateMethodType.dropParameterTypes(0, numCaptures);
|
||||
|
||||
this.interfaceMethod = interfaceMethod;
|
||||
this.delegateMethod = delegateMethod;
|
||||
delegateTypeParameters = delegateMethod.arguments;
|
||||
delegateReturnType = delegateMethod.rtn;
|
||||
|
||||
factoryDescriptor = factoryMethodType.toMethodDescriptorString();
|
||||
interfaceType = Type.getMethodType(interfaceMethodType.toMethodDescriptorString());
|
||||
|
@ -151,13 +193,37 @@ public class FunctionRef {
|
|||
isDelegateInterface = false;
|
||||
|
||||
this.interfaceMethod = null;
|
||||
delegateMethod = null;
|
||||
delegateTypeParameters = null;
|
||||
delegateReturnType = null;
|
||||
|
||||
factoryDescriptor = null;
|
||||
interfaceType = null;
|
||||
delegateType = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Looks up {@code type} from the whitelist, and returns a matching constructor.
|
||||
*/
|
||||
private static PainlessConstructor lookup(PainlessLookup painlessLookup, Class<?> expected, String type) {
|
||||
// check its really a functional interface
|
||||
// for e.g. Comparable
|
||||
PainlessMethod method = painlessLookup.getPainlessStructFromJavaClass(expected).functionalMethod;
|
||||
if (method == null) {
|
||||
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::new] " +
|
||||
"to [" + PainlessLookupUtility.typeToCanonicalTypeName(expected) + "], not a functional interface");
|
||||
}
|
||||
|
||||
// lookup requested constructor
|
||||
PainlessClass struct = painlessLookup.getPainlessStructFromJavaClass(painlessLookup.getJavaClassFromPainlessType(type));
|
||||
PainlessConstructor impl = struct.constructors.get(PainlessLookupUtility.buildPainlessConstructorKey(method.arguments.size()));
|
||||
|
||||
if (impl == null) {
|
||||
throw new IllegalArgumentException("Unknown reference [" + type + "::new] matching [" + expected + "]");
|
||||
}
|
||||
|
||||
return impl;
|
||||
}
|
||||
|
||||
/**
|
||||
* Looks up {@code type::call} from the whitelist, and returns a matching method.
|
||||
*/
|
||||
|
@ -174,27 +240,22 @@ public class FunctionRef {
|
|||
// lookup requested method
|
||||
PainlessClass struct = painlessLookup.getPainlessStructFromJavaClass(painlessLookup.getJavaClassFromPainlessType(type));
|
||||
final PainlessMethod impl;
|
||||
// ctor ref
|
||||
if ("new".equals(call)) {
|
||||
impl = struct.constructors.get(PainlessLookupUtility.buildPainlessMethodKey("<init>", method.arguments.size()));
|
||||
} else {
|
||||
// look for a static impl first
|
||||
PainlessMethod staticImpl =
|
||||
struct.staticMethods.get(PainlessLookupUtility.buildPainlessMethodKey(call, method.arguments.size()));
|
||||
if (staticImpl == null) {
|
||||
// otherwise a virtual impl
|
||||
final int arity;
|
||||
if (receiverCaptured) {
|
||||
// receiver captured
|
||||
arity = method.arguments.size();
|
||||
} else {
|
||||
// receiver passed
|
||||
arity = method.arguments.size() - 1;
|
||||
}
|
||||
impl = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey(call, arity));
|
||||
// look for a static impl first
|
||||
PainlessMethod staticImpl =
|
||||
struct.staticMethods.get(PainlessLookupUtility.buildPainlessMethodKey(call, method.arguments.size()));
|
||||
if (staticImpl == null) {
|
||||
// otherwise a virtual impl
|
||||
final int arity;
|
||||
if (receiverCaptured) {
|
||||
// receiver captured
|
||||
arity = method.arguments.size();
|
||||
} else {
|
||||
impl = staticImpl;
|
||||
// receiver passed
|
||||
arity = method.arguments.size() - 1;
|
||||
}
|
||||
impl = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey(call, arity));
|
||||
} else {
|
||||
impl = staticImpl;
|
||||
}
|
||||
if (impl == null) {
|
||||
throw new IllegalArgumentException("Unknown reference [" + type + "::" + call + "] matching " +
|
||||
|
|
|
@ -36,7 +36,6 @@ import java.security.AccessController;
|
|||
import java.security.PrivilegedAction;
|
||||
|
||||
import static java.lang.invoke.MethodHandles.Lookup;
|
||||
import static org.elasticsearch.painless.Compiler.Loader;
|
||||
import static org.elasticsearch.painless.WriterConstants.CLASS_VERSION;
|
||||
import static org.elasticsearch.painless.WriterConstants.CTOR_METHOD_NAME;
|
||||
import static org.elasticsearch.painless.WriterConstants.DELEGATE_BOOTSTRAP_HANDLE;
|
||||
|
@ -207,7 +206,7 @@ public final class LambdaBootstrap {
|
|||
MethodType delegateMethodType,
|
||||
int isDelegateInterface)
|
||||
throws LambdaConversionException {
|
||||
Loader loader = (Loader)lookup.lookupClass().getClassLoader();
|
||||
Compiler.Loader loader = (Compiler.Loader)lookup.lookupClass().getClassLoader();
|
||||
String lambdaClassName = Type.getInternalName(lookup.lookupClass()) + "$$Lambda" + loader.newLambdaIdentifier();
|
||||
Type lambdaClassType = Type.getObjectType(lambdaClassName);
|
||||
Type delegateClassType = Type.getObjectType(delegateClassName.replace('.', '/'));
|
||||
|
@ -457,11 +456,11 @@ public final class LambdaBootstrap {
|
|||
}
|
||||
|
||||
/**
|
||||
* Defines the {@link Class} for the lambda class using the same {@link Loader}
|
||||
* Defines the {@link Class} for the lambda class using the same {@link Compiler.Loader}
|
||||
* that originally defined the class for the Painless script.
|
||||
*/
|
||||
private static Class<?> createLambdaClass(
|
||||
Loader loader,
|
||||
Compiler.Loader loader,
|
||||
ClassWriter cw,
|
||||
Type lambdaClassType) {
|
||||
|
||||
|
|
|
@ -102,10 +102,10 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr
|
|||
for (Map.Entry<ScriptContext<?>, List<Whitelist>> entry : contexts.entrySet()) {
|
||||
ScriptContext<?> context = entry.getKey();
|
||||
if (context.instanceClazz.equals(SearchScript.class) || context.instanceClazz.equals(ExecutableScript.class)) {
|
||||
contextsToCompilers.put(context, new Compiler(GenericElasticsearchScript.class,
|
||||
contextsToCompilers.put(context, new Compiler(GenericElasticsearchScript.class, null, null,
|
||||
PainlessLookupBuilder.buildFromWhitelists(entry.getValue())));
|
||||
} else {
|
||||
contextsToCompilers.put(context, new Compiler(context.instanceClazz,
|
||||
contextsToCompilers.put(context, new Compiler(context.instanceClazz, context.factoryClazz, context.statefulFactoryClazz,
|
||||
PainlessLookupBuilder.buildFromWhitelists(entry.getValue())));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,8 @@ import java.util.Collections;
|
|||
import java.util.Map;
|
||||
|
||||
public final class PainlessClass {
|
||||
public final Map<String, PainlessMethod> constructors;
|
||||
public final Map<String, PainlessConstructor> constructors;
|
||||
|
||||
public final Map<String, PainlessMethod> staticMethods;
|
||||
public final Map<String, PainlessMethod> methods;
|
||||
|
||||
|
@ -36,13 +37,14 @@ public final class PainlessClass {
|
|||
|
||||
public final PainlessMethod functionalMethod;
|
||||
|
||||
PainlessClass(Map<String, PainlessMethod> constructors,
|
||||
PainlessClass(Map<String, PainlessConstructor> constructors,
|
||||
Map<String, PainlessMethod> staticMethods, Map<String, PainlessMethod> methods,
|
||||
Map<String, PainlessField> staticFields, Map<String, PainlessField> fields,
|
||||
Map<String, MethodHandle> getterMethodHandles, Map<String, MethodHandle> setterMethodHandles,
|
||||
PainlessMethod functionalMethod) {
|
||||
|
||||
this.constructors = Collections.unmodifiableMap(constructors);
|
||||
|
||||
this.staticMethods = Collections.unmodifiableMap(staticMethods);
|
||||
this.methods = Collections.unmodifiableMap(methods);
|
||||
|
||||
|
|
|
@ -24,7 +24,8 @@ import java.util.HashMap;
|
|||
import java.util.Map;
|
||||
|
||||
final class PainlessClassBuilder {
|
||||
final Map<String, PainlessMethod> constructors;
|
||||
final Map<String, PainlessConstructor> constructors;
|
||||
|
||||
final Map<String, PainlessMethod> staticMethods;
|
||||
final Map<String, PainlessMethod> methods;
|
||||
|
||||
|
@ -38,6 +39,7 @@ final class PainlessClassBuilder {
|
|||
|
||||
PainlessClassBuilder() {
|
||||
constructors = new HashMap<>();
|
||||
|
||||
staticMethods = new HashMap<>();
|
||||
methods = new HashMap<>();
|
||||
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.painless.lookup;
|
||||
|
||||
import java.lang.invoke.MethodHandle;
|
||||
import java.lang.invoke.MethodType;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.util.List;
|
||||
|
||||
public class PainlessConstructor {
|
||||
public final Constructor<?> javaConstructor;
|
||||
public final List<Class<?>> typeParameters;
|
||||
public final MethodHandle methodHandle;
|
||||
public final MethodType methodType;
|
||||
|
||||
PainlessConstructor(Constructor<?> javaConstructor, List<Class<?>> typeParameters, MethodHandle methodHandle, MethodType methodType) {
|
||||
this.javaConstructor = javaConstructor;
|
||||
this.typeParameters = typeParameters;
|
||||
this.methodHandle = methodHandle;
|
||||
this.methodType = methodType;
|
||||
}
|
||||
}
|
|
@ -40,15 +40,47 @@ import java.util.Map;
|
|||
import java.util.Objects;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.CONSTRUCTOR_NAME;
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.DEF_CLASS_NAME;
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessConstructorKey;
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessFieldKey;
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessMethodKey;
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.typeToCanonicalTypeName;
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.typeToJavaType;
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.typesToCanonicalTypeNames;
|
||||
|
||||
public class PainlessLookupBuilder {
|
||||
public final class PainlessLookupBuilder {
|
||||
|
||||
private static class PainlessConstructorCacheKey {
|
||||
|
||||
private final Class<?> targetType;
|
||||
private final List<Class<?>> typeParameters;
|
||||
|
||||
private PainlessConstructorCacheKey(Class<?> targetType, List<Class<?>> typeParameters) {
|
||||
this.targetType = targetType;
|
||||
this.typeParameters = Collections.unmodifiableList(typeParameters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object object) {
|
||||
if (this == object) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (object == null || getClass() != object.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
PainlessConstructorCacheKey that = (PainlessConstructorCacheKey)object;
|
||||
|
||||
return Objects.equals(targetType, that.targetType) &&
|
||||
Objects.equals(typeParameters, that.typeParameters);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(targetType, typeParameters);
|
||||
}
|
||||
}
|
||||
|
||||
private static class PainlessMethodCacheKey {
|
||||
|
||||
|
@ -120,8 +152,9 @@ public class PainlessLookupBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
private static final Map<PainlessMethodCacheKey, PainlessMethod> painlessMethodCache = new HashMap<>();
|
||||
private static final Map<PainlessFieldCacheKey, PainlessField> painlessFieldCache = new HashMap<>();
|
||||
private static final Map<PainlessConstructorCacheKey, PainlessConstructor> painlessConstuctorCache = new HashMap<>();
|
||||
private static final Map<PainlessMethodCacheKey, PainlessMethod> painlessMethodCache = new HashMap<>();
|
||||
private static final Map<PainlessFieldCacheKey, PainlessField> painlessFieldCache = new HashMap<>();
|
||||
|
||||
private static final Pattern CLASS_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][._a-zA-Z0-9]*$");
|
||||
private static final Pattern METHOD_NAME_PATTERN = Pattern.compile("^[_a-zA-Z][_a-zA-Z0-9]*$");
|
||||
|
@ -343,11 +376,10 @@ public class PainlessLookupBuilder {
|
|||
"[[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "] not found", nsme);
|
||||
}
|
||||
|
||||
String painlessMethodKey = buildPainlessMethodKey(CONSTRUCTOR_NAME, typeParametersSize);
|
||||
PainlessMethod painlessConstructor = painlessClassBuilder.constructors.get(painlessMethodKey);
|
||||
String painlessConstructorKey = buildPainlessConstructorKey(typeParametersSize);
|
||||
PainlessConstructor painlessConstructor = painlessClassBuilder.constructors.get(painlessConstructorKey);
|
||||
|
||||
if (painlessConstructor == null) {
|
||||
org.objectweb.asm.commons.Method asmConstructor = org.objectweb.asm.commons.Method.getMethod(javaConstructor);
|
||||
MethodHandle methodHandle;
|
||||
|
||||
try {
|
||||
|
@ -359,17 +391,16 @@ public class PainlessLookupBuilder {
|
|||
|
||||
MethodType methodType = methodHandle.type();
|
||||
|
||||
painlessConstructor = painlessMethodCache.computeIfAbsent(
|
||||
new PainlessMethodCacheKey(targetClass, CONSTRUCTOR_NAME, typeParameters),
|
||||
key -> new PainlessMethod(CONSTRUCTOR_NAME, targetClass, null, void.class, typeParameters,
|
||||
asmConstructor, javaConstructor.getModifiers(), methodHandle, methodType)
|
||||
painlessConstructor = painlessConstuctorCache.computeIfAbsent(
|
||||
new PainlessConstructorCacheKey(targetClass, typeParameters),
|
||||
key -> new PainlessConstructor(javaConstructor, typeParameters, methodHandle, methodType)
|
||||
);
|
||||
|
||||
painlessClassBuilder.constructors.put(painlessMethodKey, painlessConstructor);
|
||||
} else if (painlessConstructor.arguments.equals(typeParameters) == false){
|
||||
painlessClassBuilder.constructors.put(painlessConstructorKey, painlessConstructor);
|
||||
} else if (painlessConstructor.typeParameters.equals(typeParameters) == false){
|
||||
throw new IllegalArgumentException("cannot have constructors " +
|
||||
"[[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(typeParameters) + "] and " +
|
||||
"[[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(painlessConstructor.arguments) + "] " +
|
||||
"[[" + targetCanonicalClassName + "], " + typesToCanonicalTypeNames(painlessConstructor.typeParameters) + "] " +
|
||||
"with the same arity and different type parameters");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -336,6 +336,13 @@ public final class PainlessLookupUtility {
|
|||
type == String.class;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a painless constructor key used to lookup painless constructors from a painless class.
|
||||
*/
|
||||
public static String buildPainlessConstructorKey(int constructorArity) {
|
||||
return CONSTRUCTOR_NAME + "/" + constructorArity;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a painless method key used to lookup painless methods from a painless class.
|
||||
*/
|
||||
|
|
|
@ -77,18 +77,18 @@ public final class ECapturingFunctionRef extends AExpression implements ILambda
|
|||
// static case
|
||||
if (captured.clazz != def.class) {
|
||||
try {
|
||||
ref = new FunctionRef(locals.getPainlessLookup(), expected,
|
||||
ref = FunctionRef.resolveFromLookup(locals.getPainlessLookup(), expected,
|
||||
PainlessLookupUtility.typeToCanonicalTypeName(captured.clazz), call, 1);
|
||||
|
||||
// check casts between the interface method and the delegate method are legal
|
||||
for (int i = 0; i < ref.interfaceMethod.arguments.size(); ++i) {
|
||||
Class<?> from = ref.interfaceMethod.arguments.get(i);
|
||||
Class<?> to = ref.delegateMethod.arguments.get(i);
|
||||
Class<?> to = ref.delegateTypeParameters.get(i);
|
||||
AnalyzerCaster.getLegalCast(location, from, to, false, true);
|
||||
}
|
||||
|
||||
if (ref.interfaceMethod.rtn != void.class) {
|
||||
AnalyzerCaster.getLegalCast(location, ref.delegateMethod.rtn, ref.interfaceMethod.rtn, false, true);
|
||||
AnalyzerCaster.getLegalCast(location, ref.delegateReturnType, ref.interfaceMethod.rtn, false, true);
|
||||
}
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw createError(e);
|
||||
|
|
|
@ -90,7 +90,7 @@ public final class EFunctionRef extends AExpression implements ILambda {
|
|||
}
|
||||
} else {
|
||||
// whitelist lookup
|
||||
ref = new FunctionRef(locals.getPainlessLookup(), expected, type, call, 0);
|
||||
ref = FunctionRef.resolveFromLookup(locals.getPainlessLookup(), expected, type, call, 0);
|
||||
}
|
||||
|
||||
} catch (IllegalArgumentException e) {
|
||||
|
|
|
@ -23,10 +23,12 @@ import org.elasticsearch.painless.Globals;
|
|||
import org.elasticsearch.painless.Locals;
|
||||
import org.elasticsearch.painless.Location;
|
||||
import org.elasticsearch.painless.MethodWriter;
|
||||
import org.elasticsearch.painless.lookup.PainlessConstructor;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||
import org.elasticsearch.painless.lookup.def;
|
||||
import org.objectweb.asm.Type;
|
||||
import org.objectweb.asm.commons.Method;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
@ -38,7 +40,7 @@ import java.util.Set;
|
|||
public final class EListInit extends AExpression {
|
||||
private final List<AExpression> values;
|
||||
|
||||
private PainlessMethod constructor = null;
|
||||
private PainlessConstructor constructor = null;
|
||||
private PainlessMethod method = null;
|
||||
|
||||
public EListInit(Location location, List<AExpression> values) {
|
||||
|
@ -62,8 +64,8 @@ public final class EListInit extends AExpression {
|
|||
|
||||
actual = ArrayList.class;
|
||||
|
||||
constructor = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual).constructors
|
||||
.get(PainlessLookupUtility.buildPainlessMethodKey("<init>", 0));
|
||||
constructor = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual).constructors.get(
|
||||
PainlessLookupUtility.buildPainlessConstructorKey(0));
|
||||
|
||||
if (constructor == null) {
|
||||
throw createError(new IllegalStateException("Illegal tree structure."));
|
||||
|
@ -92,7 +94,8 @@ public final class EListInit extends AExpression {
|
|||
|
||||
writer.newInstance(MethodWriter.getType(actual));
|
||||
writer.dup();
|
||||
writer.invokeConstructor(Type.getType(constructor.target), constructor.method);
|
||||
writer.invokeConstructor(
|
||||
Type.getType(constructor.javaConstructor.getDeclaringClass()), Method.getMethod(constructor.javaConstructor));
|
||||
|
||||
for (AExpression value : values) {
|
||||
writer.dup();
|
||||
|
|
|
@ -23,10 +23,12 @@ import org.elasticsearch.painless.Globals;
|
|||
import org.elasticsearch.painless.Locals;
|
||||
import org.elasticsearch.painless.Location;
|
||||
import org.elasticsearch.painless.MethodWriter;
|
||||
import org.elasticsearch.painless.lookup.PainlessConstructor;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||
import org.elasticsearch.painless.lookup.def;
|
||||
import org.objectweb.asm.Type;
|
||||
import org.objectweb.asm.commons.Method;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -39,7 +41,7 @@ public final class EMapInit extends AExpression {
|
|||
private final List<AExpression> keys;
|
||||
private final List<AExpression> values;
|
||||
|
||||
private PainlessMethod constructor = null;
|
||||
private PainlessConstructor constructor = null;
|
||||
private PainlessMethod method = null;
|
||||
|
||||
public EMapInit(Location location, List<AExpression> keys, List<AExpression> values) {
|
||||
|
@ -68,8 +70,8 @@ public final class EMapInit extends AExpression {
|
|||
|
||||
actual = HashMap.class;
|
||||
|
||||
constructor = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual).constructors
|
||||
.get(PainlessLookupUtility.buildPainlessMethodKey("<init>", 0));
|
||||
constructor = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual).constructors.get(
|
||||
PainlessLookupUtility.buildPainlessConstructorKey(0));
|
||||
|
||||
if (constructor == null) {
|
||||
throw createError(new IllegalStateException("Illegal tree structure."));
|
||||
|
@ -111,7 +113,8 @@ public final class EMapInit extends AExpression {
|
|||
|
||||
writer.newInstance(MethodWriter.getType(actual));
|
||||
writer.dup();
|
||||
writer.invokeConstructor(Type.getType(constructor.target), constructor.method);
|
||||
writer.invokeConstructor(
|
||||
Type.getType(constructor.javaConstructor.getDeclaringClass()), Method.getMethod(constructor.javaConstructor));
|
||||
|
||||
for (int index = 0; index < keys.size(); ++index) {
|
||||
AExpression key = keys.get(index);
|
||||
|
|
|
@ -24,9 +24,10 @@ import org.elasticsearch.painless.Locals;
|
|||
import org.elasticsearch.painless.Location;
|
||||
import org.elasticsearch.painless.MethodWriter;
|
||||
import org.elasticsearch.painless.lookup.PainlessClass;
|
||||
import org.elasticsearch.painless.lookup.PainlessConstructor;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||
import org.objectweb.asm.Type;
|
||||
import org.objectweb.asm.commons.Method;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
@ -40,7 +41,7 @@ public final class ENewObj extends AExpression {
|
|||
private final String type;
|
||||
private final List<AExpression> arguments;
|
||||
|
||||
private PainlessMethod constructor;
|
||||
private PainlessConstructor constructor;
|
||||
|
||||
public ENewObj(Location location, String type, List<AExpression> arguments) {
|
||||
super(location);
|
||||
|
@ -65,16 +66,16 @@ public final class ENewObj extends AExpression {
|
|||
}
|
||||
|
||||
PainlessClass struct = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual);
|
||||
constructor = struct.constructors.get(PainlessLookupUtility.buildPainlessMethodKey("<init>", arguments.size()));
|
||||
constructor = struct.constructors.get(PainlessLookupUtility.buildPainlessConstructorKey(arguments.size()));
|
||||
|
||||
if (constructor != null) {
|
||||
Class<?>[] types = new Class<?>[constructor.arguments.size()];
|
||||
constructor.arguments.toArray(types);
|
||||
Class<?>[] types = new Class<?>[constructor.typeParameters.size()];
|
||||
constructor.typeParameters.toArray(types);
|
||||
|
||||
if (constructor.arguments.size() != arguments.size()) {
|
||||
if (constructor.typeParameters.size() != arguments.size()) {
|
||||
throw createError(new IllegalArgumentException(
|
||||
"When calling constructor on type [" + PainlessLookupUtility.typeToCanonicalTypeName(actual) + "] " +
|
||||
"expected [" + constructor.arguments.size() + "] arguments, but found [" + arguments.size() + "]."));
|
||||
"expected [" + constructor.typeParameters.size() + "] arguments, but found [" + arguments.size() + "]."));
|
||||
}
|
||||
|
||||
for (int argument = 0; argument < arguments.size(); ++argument) {
|
||||
|
@ -107,7 +108,8 @@ public final class ENewObj extends AExpression {
|
|||
argument.write(writer, globals);
|
||||
}
|
||||
|
||||
writer.invokeConstructor(Type.getType(constructor.target), constructor.method);
|
||||
writer.invokeConstructor(
|
||||
Type.getType(constructor.javaConstructor.getDeclaringClass()), Method.getMethod(constructor.javaConstructor));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -69,7 +69,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
}
|
||||
|
||||
public void testGets() {
|
||||
Compiler compiler = new Compiler(Gets.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(Gets.class, null, null, painlessLookup);
|
||||
Map<String, Object> map = new HashMap<>();
|
||||
map.put("s", 1);
|
||||
|
||||
|
@ -87,7 +87,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract Object execute();
|
||||
}
|
||||
public void testNoArgs() {
|
||||
Compiler compiler = new Compiler(NoArgs.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(NoArgs.class, null, null, painlessLookup);
|
||||
assertEquals(1, ((NoArgs)scriptEngine.compile(compiler, null, "1", emptyMap())).execute());
|
||||
assertEquals("foo", ((NoArgs)scriptEngine.compile(compiler, null, "'foo'", emptyMap())).execute());
|
||||
|
||||
|
@ -111,13 +111,13 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract Object execute(Object arg);
|
||||
}
|
||||
public void testOneArg() {
|
||||
Compiler compiler = new Compiler(OneArg.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(OneArg.class, null, null, painlessLookup);
|
||||
Object rando = randomInt();
|
||||
assertEquals(rando, ((OneArg)scriptEngine.compile(compiler, null, "arg", emptyMap())).execute(rando));
|
||||
rando = randomAlphaOfLength(5);
|
||||
assertEquals(rando, ((OneArg)scriptEngine.compile(compiler, null, "arg", emptyMap())).execute(rando));
|
||||
|
||||
Compiler noargs = new Compiler(NoArgs.class, painlessLookup);
|
||||
Compiler noargs = new Compiler(NoArgs.class, null, null, painlessLookup);
|
||||
Exception e = expectScriptThrows(IllegalArgumentException.class, () ->
|
||||
scriptEngine.compile(noargs, null, "doc", emptyMap()));
|
||||
assertEquals("Variable [doc] is not defined.", e.getMessage());
|
||||
|
@ -132,7 +132,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract Object execute(String[] arg);
|
||||
}
|
||||
public void testArrayArg() {
|
||||
Compiler compiler = new Compiler(ArrayArg.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(ArrayArg.class, null, null, painlessLookup);
|
||||
String rando = randomAlphaOfLength(5);
|
||||
assertEquals(rando, ((ArrayArg)scriptEngine.compile(compiler, null, "arg[0]", emptyMap())).execute(new String[] {rando, "foo"}));
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract Object execute(int[] arg);
|
||||
}
|
||||
public void testPrimitiveArrayArg() {
|
||||
Compiler compiler = new Compiler(PrimitiveArrayArg.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(PrimitiveArrayArg.class, null, null, painlessLookup);
|
||||
int rando = randomInt();
|
||||
assertEquals(rando, ((PrimitiveArrayArg)scriptEngine.compile(compiler, null, "arg[0]", emptyMap())).execute(new int[] {rando, 10}));
|
||||
}
|
||||
|
@ -152,7 +152,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract Object execute(Object[] arg);
|
||||
}
|
||||
public void testDefArrayArg() {
|
||||
Compiler compiler = new Compiler(DefArrayArg.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(DefArrayArg.class, null, null, painlessLookup);
|
||||
Object rando = randomInt();
|
||||
assertEquals(rando, ((DefArrayArg)scriptEngine.compile(compiler, null, "arg[0]", emptyMap())).execute(new Object[] {rando, 10}));
|
||||
rando = randomAlphaOfLength(5);
|
||||
|
@ -170,7 +170,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract boolean needsD();
|
||||
}
|
||||
public void testManyArgs() {
|
||||
Compiler compiler = new Compiler(ManyArgs.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(ManyArgs.class, null, null, painlessLookup);
|
||||
int rando = randomInt();
|
||||
assertEquals(rando, ((ManyArgs)scriptEngine.compile(compiler, null, "a", emptyMap())).execute(rando, 0, 0, 0));
|
||||
assertEquals(10, ((ManyArgs)scriptEngine.compile(compiler, null, "a + b + c + d", emptyMap())).execute(1, 2, 3, 4));
|
||||
|
@ -198,7 +198,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract Object execute(String... arg);
|
||||
}
|
||||
public void testVararg() {
|
||||
Compiler compiler = new Compiler(VarargTest.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(VarargTest.class, null, null, painlessLookup);
|
||||
assertEquals("foo bar baz", ((VarargTest)scriptEngine.compile(compiler, null, "String.join(' ', Arrays.asList(arg))", emptyMap()))
|
||||
.execute("foo", "bar", "baz"));
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
}
|
||||
}
|
||||
public void testDefaultMethods() {
|
||||
Compiler compiler = new Compiler(DefaultMethods.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(DefaultMethods.class, null, null, painlessLookup);
|
||||
int rando = randomInt();
|
||||
assertEquals(rando, ((DefaultMethods)scriptEngine.compile(compiler, null, "a", emptyMap())).execute(rando, 0, 0, 0));
|
||||
assertEquals(rando, ((DefaultMethods)scriptEngine.compile(compiler, null, "a", emptyMap())).executeWithASingleOne(rando, 0, 0));
|
||||
|
@ -228,7 +228,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract void execute(Map<String, Object> map);
|
||||
}
|
||||
public void testReturnsVoid() {
|
||||
Compiler compiler = new Compiler(ReturnsVoid.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(ReturnsVoid.class, null, null, painlessLookup);
|
||||
Map<String, Object> map = new HashMap<>();
|
||||
((ReturnsVoid)scriptEngine.compile(compiler, null, "map.a = 'foo'", emptyMap())).execute(map);
|
||||
assertEquals(singletonMap("a", "foo"), map);
|
||||
|
@ -247,7 +247,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract boolean execute();
|
||||
}
|
||||
public void testReturnsPrimitiveBoolean() {
|
||||
Compiler compiler = new Compiler(ReturnsPrimitiveBoolean.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(ReturnsPrimitiveBoolean.class, null, null, painlessLookup);
|
||||
|
||||
assertEquals(true, ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "true", emptyMap())).execute());
|
||||
assertEquals(false, ((ReturnsPrimitiveBoolean)scriptEngine.compile(compiler, null, "false", emptyMap())).execute());
|
||||
|
@ -289,7 +289,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract int execute();
|
||||
}
|
||||
public void testReturnsPrimitiveInt() {
|
||||
Compiler compiler = new Compiler(ReturnsPrimitiveInt.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(ReturnsPrimitiveInt.class, null, null, painlessLookup);
|
||||
|
||||
assertEquals(1, ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "1", emptyMap())).execute());
|
||||
assertEquals(1, ((ReturnsPrimitiveInt)scriptEngine.compile(compiler, null, "(int) 1L", emptyMap())).execute());
|
||||
|
@ -331,7 +331,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract float execute();
|
||||
}
|
||||
public void testReturnsPrimitiveFloat() {
|
||||
Compiler compiler = new Compiler(ReturnsPrimitiveFloat.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(ReturnsPrimitiveFloat.class, null, null, painlessLookup);
|
||||
|
||||
assertEquals(1.1f, ((ReturnsPrimitiveFloat)scriptEngine.compile(compiler, null, "1.1f", emptyMap())).execute(), 0);
|
||||
assertEquals(1.1f, ((ReturnsPrimitiveFloat)scriptEngine.compile(compiler, null, "(float) 1.1d", emptyMap())).execute(), 0);
|
||||
|
@ -362,7 +362,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract double execute();
|
||||
}
|
||||
public void testReturnsPrimitiveDouble() {
|
||||
Compiler compiler = new Compiler(ReturnsPrimitiveDouble.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(ReturnsPrimitiveDouble.class, null, null, painlessLookup);
|
||||
|
||||
assertEquals(1.0, ((ReturnsPrimitiveDouble)scriptEngine.compile(compiler, null, "1", emptyMap())).execute(), 0);
|
||||
assertEquals(1.0, ((ReturnsPrimitiveDouble)scriptEngine.compile(compiler, null, "1L", emptyMap())).execute(), 0);
|
||||
|
@ -396,7 +396,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract Object execute(String foo);
|
||||
}
|
||||
public void testNoArgumentsConstant() {
|
||||
Compiler compiler = new Compiler(NoArgumentsConstant.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(NoArgumentsConstant.class, null, null, painlessLookup);
|
||||
Exception e = expectScriptThrows(IllegalArgumentException.class, false, () ->
|
||||
scriptEngine.compile(compiler, null, "1", emptyMap()));
|
||||
assertThat(e.getMessage(), startsWith(
|
||||
|
@ -409,7 +409,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract Object execute(String foo);
|
||||
}
|
||||
public void testWrongArgumentsConstant() {
|
||||
Compiler compiler = new Compiler(WrongArgumentsConstant.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(WrongArgumentsConstant.class, null, null, painlessLookup);
|
||||
Exception e = expectScriptThrows(IllegalArgumentException.class, false, () ->
|
||||
scriptEngine.compile(compiler, null, "1", emptyMap()));
|
||||
assertThat(e.getMessage(), startsWith(
|
||||
|
@ -422,7 +422,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract Object execute(String foo);
|
||||
}
|
||||
public void testWrongLengthOfArgumentConstant() {
|
||||
Compiler compiler = new Compiler(WrongLengthOfArgumentConstant.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(WrongLengthOfArgumentConstant.class, null, null, painlessLookup);
|
||||
Exception e = expectScriptThrows(IllegalArgumentException.class, false, () ->
|
||||
scriptEngine.compile(compiler, null, "1", emptyMap()));
|
||||
assertThat(e.getMessage(), startsWith("[" + WrongLengthOfArgumentConstant.class.getName() + "#ARGUMENTS] has length [2] but ["
|
||||
|
@ -434,7 +434,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract Object execute(UnknownArgType foo);
|
||||
}
|
||||
public void testUnknownArgType() {
|
||||
Compiler compiler = new Compiler(UnknownArgType.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(UnknownArgType.class, null, null, painlessLookup);
|
||||
Exception e = expectScriptThrows(IllegalArgumentException.class, false, () ->
|
||||
scriptEngine.compile(compiler, null, "1", emptyMap()));
|
||||
assertEquals("[foo] is of unknown type [" + UnknownArgType.class.getName() + ". Painless interfaces can only accept arguments "
|
||||
|
@ -446,7 +446,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract UnknownReturnType execute(String foo);
|
||||
}
|
||||
public void testUnknownReturnType() {
|
||||
Compiler compiler = new Compiler(UnknownReturnType.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(UnknownReturnType.class, null, null, painlessLookup);
|
||||
Exception e = expectScriptThrows(IllegalArgumentException.class, false, () ->
|
||||
scriptEngine.compile(compiler, null, "1", emptyMap()));
|
||||
assertEquals("Painless can only implement execute methods returning a whitelisted type but [" + UnknownReturnType.class.getName()
|
||||
|
@ -458,7 +458,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract Object execute(UnknownArgTypeInArray[] foo);
|
||||
}
|
||||
public void testUnknownArgTypeInArray() {
|
||||
Compiler compiler = new Compiler(UnknownArgTypeInArray.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(UnknownArgTypeInArray.class, null, null, painlessLookup);
|
||||
Exception e = expectScriptThrows(IllegalArgumentException.class, false, () ->
|
||||
scriptEngine.compile(compiler, null, "1", emptyMap()));
|
||||
assertEquals("[foo] is of unknown type [" + UnknownArgTypeInArray.class.getName() + ". Painless interfaces can only accept "
|
||||
|
@ -470,7 +470,7 @@ public class BaseClassTests extends ScriptTestCase {
|
|||
public abstract Object execute(boolean foo);
|
||||
}
|
||||
public void testTwoExecuteMethods() {
|
||||
Compiler compiler = new Compiler(TwoExecuteMethods.class, painlessLookup);
|
||||
Compiler compiler = new Compiler(TwoExecuteMethods.class, null, null, painlessLookup);
|
||||
Exception e = expectScriptThrows(IllegalArgumentException.class, false, () ->
|
||||
scriptEngine.compile(compiler, null, "null", emptyMap()));
|
||||
assertEquals("Painless can only implement interfaces that have a single method named [execute] but ["
|
||||
|
|
|
@ -40,7 +40,7 @@ final class Debugger {
|
|||
PrintWriter outputWriter = new PrintWriter(output);
|
||||
Textifier textifier = new Textifier();
|
||||
try {
|
||||
new Compiler(iface, PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS))
|
||||
new Compiler(iface, null, null, PainlessLookupBuilder.buildFromWhitelists(Whitelist.BASE_WHITELISTS))
|
||||
.compile("<debugging>", source, settings, textifier);
|
||||
} catch (RuntimeException e) {
|
||||
textifier.print(outputWriter);
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.io.PathUtils;
|
|||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.painless.lookup.PainlessClass;
|
||||
import org.elasticsearch.painless.lookup.PainlessConstructor;
|
||||
import org.elasticsearch.painless.lookup.PainlessField;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookup;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookupBuilder;
|
||||
|
@ -57,7 +58,8 @@ public class PainlessDocGenerator {
|
|||
private static final Logger logger = ESLoggerFactory.getLogger(PainlessDocGenerator.class);
|
||||
private static final Comparator<PainlessField> FIELD_NAME = comparing(f -> f.name);
|
||||
private static final Comparator<PainlessMethod> METHOD_NAME = comparing(m -> m.name);
|
||||
private static final Comparator<PainlessMethod> NUMBER_OF_ARGS = comparing(m -> m.arguments.size());
|
||||
private static final Comparator<PainlessMethod> METHOD_NUMBER_OF_PARAMS = comparing(m -> m.arguments.size());
|
||||
private static final Comparator<PainlessConstructor> CONSTRUCTOR_NUMBER_OF_PARAMS = comparing(m -> m.typeParameters.size());
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
Path apiRootPath = PathUtils.get(args[0]);
|
||||
|
@ -103,12 +105,15 @@ public class PainlessDocGenerator {
|
|||
|
||||
Consumer<PainlessField> documentField = field -> PainlessDocGenerator.documentField(typeStream, field);
|
||||
Consumer<PainlessMethod> documentMethod = method -> PainlessDocGenerator.documentMethod(typeStream, method);
|
||||
Consumer<PainlessConstructor> documentConstructor =
|
||||
constructor -> PainlessDocGenerator.documentConstructor(typeStream, constructor);
|
||||
struct.staticFields.values().stream().sorted(FIELD_NAME).forEach(documentField);
|
||||
struct.fields.values().stream().sorted(FIELD_NAME).forEach(documentField);
|
||||
struct.staticMethods.values().stream().sorted(METHOD_NAME.thenComparing(NUMBER_OF_ARGS)).forEach(documentMethod);
|
||||
struct.constructors.values().stream().sorted(NUMBER_OF_ARGS).forEach(documentMethod);
|
||||
struct.staticMethods.values().stream().sorted(
|
||||
METHOD_NAME.thenComparing(METHOD_NUMBER_OF_PARAMS)).forEach(documentMethod);
|
||||
struct.constructors.values().stream().sorted(CONSTRUCTOR_NUMBER_OF_PARAMS).forEach(documentConstructor);
|
||||
Map<String, Class<?>> inherited = new TreeMap<>();
|
||||
struct.methods.values().stream().sorted(METHOD_NAME.thenComparing(NUMBER_OF_ARGS)).forEach(method -> {
|
||||
struct.methods.values().stream().sorted(METHOD_NAME.thenComparing(METHOD_NUMBER_OF_PARAMS)).forEach(method -> {
|
||||
if (method.target == clazz) {
|
||||
documentMethod(typeStream, method);
|
||||
} else {
|
||||
|
@ -164,6 +169,41 @@ public class PainlessDocGenerator {
|
|||
stream.println();
|
||||
}
|
||||
|
||||
/**
|
||||
* Document a constructor.
|
||||
*/
|
||||
private static void documentConstructor(PrintStream stream, PainlessConstructor constructor) {
|
||||
stream.print("* ++[[");
|
||||
emitAnchor(stream, constructor);
|
||||
stream.print("]]");
|
||||
|
||||
String javadocRoot = javadocRoot(constructor.javaConstructor.getDeclaringClass());
|
||||
emitJavadocLink(stream, javadocRoot, constructor);
|
||||
stream.print('[');
|
||||
|
||||
stream.print(constructorName(constructor));
|
||||
|
||||
stream.print("](");
|
||||
boolean first = true;
|
||||
for (Class<?> arg : constructor.typeParameters) {
|
||||
if (first) {
|
||||
first = false;
|
||||
} else {
|
||||
stream.print(", ");
|
||||
}
|
||||
emitType(stream, arg);
|
||||
}
|
||||
stream.print(")++");
|
||||
|
||||
if (javadocRoot.equals("java8")) {
|
||||
stream.print(" (");
|
||||
emitJavadocLink(stream, "java9", constructor);
|
||||
stream.print("[java 9])");
|
||||
}
|
||||
|
||||
stream.println();
|
||||
}
|
||||
|
||||
/**
|
||||
* Document a method.
|
||||
*/
|
||||
|
@ -176,10 +216,8 @@ public class PainlessDocGenerator {
|
|||
stream.print("static ");
|
||||
}
|
||||
|
||||
if (false == method.name.equals("<init>")) {
|
||||
emitType(stream, method.rtn);
|
||||
stream.print(' ');
|
||||
}
|
||||
emitType(stream, method.rtn);
|
||||
stream.print(' ');
|
||||
|
||||
String javadocRoot = javadocRoot(method);
|
||||
emitJavadocLink(stream, javadocRoot, method);
|
||||
|
@ -216,6 +254,17 @@ public class PainlessDocGenerator {
|
|||
stream.print(PainlessLookupUtility.typeToCanonicalTypeName(clazz).replace('.', '-'));
|
||||
}
|
||||
|
||||
/**
|
||||
* Anchor text for a {@link PainlessConstructor}.
|
||||
*/
|
||||
private static void emitAnchor(PrintStream stream, PainlessConstructor constructor) {
|
||||
emitAnchor(stream, constructor.javaConstructor.getDeclaringClass());
|
||||
stream.print('-');
|
||||
stream.print(constructorName(constructor));
|
||||
stream.print('-');
|
||||
stream.print(constructor.typeParameters.size());
|
||||
}
|
||||
|
||||
/**
|
||||
* Anchor text for a {@link PainlessMethod}.
|
||||
*/
|
||||
|
@ -236,8 +285,12 @@ public class PainlessDocGenerator {
|
|||
stream.print(field.name);
|
||||
}
|
||||
|
||||
private static String constructorName(PainlessConstructor constructor) {
|
||||
return PainlessLookupUtility.typeToCanonicalTypeName(constructor.javaConstructor.getDeclaringClass());
|
||||
}
|
||||
|
||||
private static String methodName(PainlessMethod method) {
|
||||
return method.name.equals("<init>") ? PainlessLookupUtility.typeToCanonicalTypeName(method.target) : method.name;
|
||||
return PainlessLookupUtility.typeToCanonicalTypeName(method.target);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -269,6 +322,34 @@ public class PainlessDocGenerator {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit an external link to Javadoc for a {@link PainlessMethod}.
|
||||
*
|
||||
* @param root name of the root uri variable
|
||||
*/
|
||||
private static void emitJavadocLink(PrintStream stream, String root, PainlessConstructor constructor) {
|
||||
stream.print("link:{");
|
||||
stream.print(root);
|
||||
stream.print("-javadoc}/");
|
||||
stream.print(classUrlPath(constructor.javaConstructor.getDeclaringClass()));
|
||||
stream.print(".html#");
|
||||
stream.print(constructorName(constructor));
|
||||
stream.print("%2D");
|
||||
boolean first = true;
|
||||
for (Class<?> clazz: constructor.typeParameters) {
|
||||
if (first) {
|
||||
first = false;
|
||||
} else {
|
||||
stream.print("%2D");
|
||||
}
|
||||
stream.print(clazz.getName());
|
||||
if (clazz.isArray()) {
|
||||
stream.print(":A");
|
||||
}
|
||||
}
|
||||
stream.print("%2D");
|
||||
}
|
||||
|
||||
/**
|
||||
* Emit an external link to Javadoc for a {@link PainlessMethod}.
|
||||
*
|
||||
|
|
|
@ -27,13 +27,13 @@ import org.elasticsearch.client.ParentTaskAssigningClient;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public class TransportDeleteByQueryAction extends HandledTransportAction<DeleteByQueryRequest, BulkByScrollResponse> {
|
||||
|
||||
|
@ -46,7 +46,7 @@ public class TransportDeleteByQueryAction extends HandledTransportAction<DeleteB
|
|||
public TransportDeleteByQueryAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, Client client,
|
||||
TransportService transportService, ScriptService scriptService, ClusterService clusterService) {
|
||||
super(settings, DeleteByQueryAction.NAME, transportService, actionFilters,
|
||||
(Supplier<DeleteByQueryRequest>) DeleteByQueryRequest::new);
|
||||
(Writeable.Reader<DeleteByQueryRequest>) DeleteByQueryRequest::new);
|
||||
this.threadPool = threadPool;
|
||||
this.client = client;
|
||||
this.scriptService = scriptService;
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.elasticsearch.action.bulk.BackoffPolicy;
|
|||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.client.RestClientBuilder;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
|
@ -104,7 +105,7 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
|
|||
public TransportReindexAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, ScriptService scriptService,
|
||||
AutoCreateIndex autoCreateIndex, Client client, TransportService transportService) {
|
||||
super(settings, ReindexAction.NAME, transportService, actionFilters, ReindexRequest::new);
|
||||
super(settings, ReindexAction.NAME, transportService, actionFilters, (Writeable.Reader<ReindexRequest>)ReindexRequest::new);
|
||||
this.threadPool = threadPool;
|
||||
this.clusterService = clusterService;
|
||||
this.scriptService = scriptService;
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.client.ParentTaskAssigningClient;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
|
@ -43,7 +44,6 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.util.Map;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateByQueryRequest, BulkByScrollResponse> {
|
||||
|
||||
|
@ -56,7 +56,7 @@ public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateB
|
|||
public TransportUpdateByQueryAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, Client client,
|
||||
TransportService transportService, ScriptService scriptService, ClusterService clusterService) {
|
||||
super(settings, UpdateByQueryAction.NAME, transportService, actionFilters,
|
||||
(Supplier<UpdateByQueryRequest>) UpdateByQueryRequest::new);
|
||||
(Writeable.Reader<UpdateByQueryRequest>) UpdateByQueryRequest::new);
|
||||
this.threadPool = threadPool;
|
||||
this.client = client;
|
||||
this.scriptService = scriptService;
|
||||
|
|
|
@ -67,19 +67,17 @@ public class RoundTripTests extends ESTestCase {
|
|||
new RemoteInfo(randomAlphaOfLength(5), randomAlphaOfLength(5), port, null,
|
||||
query, username, password, headers, socketTimeout, connectTimeout));
|
||||
}
|
||||
ReindexRequest tripped = new ReindexRequest();
|
||||
roundTrip(reindex, tripped);
|
||||
ReindexRequest tripped = new ReindexRequest(toInputByteStream(reindex));
|
||||
assertRequestEquals(reindex, tripped);
|
||||
|
||||
// Try slices=auto with a version that doesn't support it, which should fail
|
||||
reindex.setSlices(AbstractBulkByScrollRequest.AUTO_SLICES);
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> roundTrip(Version.V_6_0_0_alpha1, reindex, null));
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> toInputByteStream(Version.V_6_0_0_alpha1, reindex));
|
||||
assertEquals("Slices set as \"auto\" are not supported before version [6.1.0]. Found version [6.0.0-alpha1]", e.getMessage());
|
||||
|
||||
// Try regular slices with a version that doesn't support slices=auto, which should succeed
|
||||
tripped = new ReindexRequest();
|
||||
reindex.setSlices(between(1, Integer.MAX_VALUE));
|
||||
roundTrip(Version.V_6_0_0_alpha1, reindex, tripped);
|
||||
tripped = new ReindexRequest(toInputByteStream(reindex));
|
||||
assertRequestEquals(Version.V_6_0_0_alpha1, reindex, tripped);
|
||||
}
|
||||
|
||||
|
@ -89,20 +87,18 @@ public class RoundTripTests extends ESTestCase {
|
|||
if (randomBoolean()) {
|
||||
update.setPipeline(randomAlphaOfLength(5));
|
||||
}
|
||||
UpdateByQueryRequest tripped = new UpdateByQueryRequest();
|
||||
roundTrip(update, tripped);
|
||||
UpdateByQueryRequest tripped = new UpdateByQueryRequest(toInputByteStream(update));
|
||||
assertRequestEquals(update, tripped);
|
||||
assertEquals(update.getPipeline(), tripped.getPipeline());
|
||||
|
||||
// Try slices=auto with a version that doesn't support it, which should fail
|
||||
update.setSlices(AbstractBulkByScrollRequest.AUTO_SLICES);
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> roundTrip(Version.V_6_0_0_alpha1, update, null));
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> toInputByteStream(Version.V_6_0_0_alpha1, update));
|
||||
assertEquals("Slices set as \"auto\" are not supported before version [6.1.0]. Found version [6.0.0-alpha1]", e.getMessage());
|
||||
|
||||
// Try regular slices with a version that doesn't support slices=auto, which should succeed
|
||||
tripped = new UpdateByQueryRequest();
|
||||
update.setSlices(between(1, Integer.MAX_VALUE));
|
||||
roundTrip(Version.V_6_0_0_alpha1, update, tripped);
|
||||
tripped = new UpdateByQueryRequest(toInputByteStream(update));
|
||||
assertRequestEquals(update, tripped);
|
||||
assertEquals(update.getPipeline(), tripped.getPipeline());
|
||||
}
|
||||
|
@ -110,19 +106,17 @@ public class RoundTripTests extends ESTestCase {
|
|||
public void testDeleteByQueryRequest() throws IOException {
|
||||
DeleteByQueryRequest delete = new DeleteByQueryRequest(new SearchRequest());
|
||||
randomRequest(delete);
|
||||
DeleteByQueryRequest tripped = new DeleteByQueryRequest();
|
||||
roundTrip(delete, tripped);
|
||||
DeleteByQueryRequest tripped = new DeleteByQueryRequest(toInputByteStream(delete));
|
||||
assertRequestEquals(delete, tripped);
|
||||
|
||||
// Try slices=auto with a version that doesn't support it, which should fail
|
||||
delete.setSlices(AbstractBulkByScrollRequest.AUTO_SLICES);
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> roundTrip(Version.V_6_0_0_alpha1, delete, null));
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> toInputByteStream(Version.V_6_0_0_alpha1, delete));
|
||||
assertEquals("Slices set as \"auto\" are not supported before version [6.1.0]. Found version [6.0.0-alpha1]", e.getMessage());
|
||||
|
||||
// Try regular slices with a version that doesn't support slices=auto, which should succeed
|
||||
tripped = new DeleteByQueryRequest();
|
||||
delete.setSlices(between(1, Integer.MAX_VALUE));
|
||||
roundTrip(Version.V_6_0_0_alpha1, delete, tripped);
|
||||
tripped = new DeleteByQueryRequest(toInputByteStream(delete));
|
||||
assertRequestEquals(delete, tripped);
|
||||
}
|
||||
|
||||
|
@ -198,23 +192,24 @@ public class RoundTripTests extends ESTestCase {
|
|||
request.setTaskId(new TaskId(randomAlphaOfLength(5), randomLong()));
|
||||
}
|
||||
RethrottleRequest tripped = new RethrottleRequest();
|
||||
roundTrip(request, tripped);
|
||||
// We use readFrom here because Rethrottle does not support the Writeable.Reader interface
|
||||
tripped.readFrom(toInputByteStream(request));
|
||||
assertEquals(request.getRequestsPerSecond(), tripped.getRequestsPerSecond(), 0.00001);
|
||||
assertArrayEquals(request.getActions(), tripped.getActions());
|
||||
assertEquals(request.getTaskId(), tripped.getTaskId());
|
||||
}
|
||||
|
||||
private void roundTrip(Streamable example, Streamable empty) throws IOException {
|
||||
roundTrip(Version.CURRENT, example, empty);
|
||||
private StreamInput toInputByteStream(Streamable example) throws IOException {
|
||||
return toInputByteStream(Version.CURRENT, example);
|
||||
}
|
||||
|
||||
private void roundTrip(Version version, Streamable example, Streamable empty) throws IOException {
|
||||
private StreamInput toInputByteStream(Version version, Streamable example) throws IOException {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.setVersion(version);
|
||||
example.writeTo(out);
|
||||
StreamInput in = out.bytes().streamInput();
|
||||
in.setVersion(version);
|
||||
empty.readFrom(in);
|
||||
return in;
|
||||
}
|
||||
|
||||
private Script randomScript() {
|
||||
|
|
|
@ -23,8 +23,9 @@ esplugin {
|
|||
}
|
||||
|
||||
dependencies {
|
||||
compile 'com.microsoft.azure:azure-storage:5.0.0'
|
||||
compile 'com.microsoft.azure:azure-keyvault-core:0.8.0'
|
||||
compile 'com.microsoft.azure:azure-storage:8.0.0'
|
||||
compile 'com.microsoft.azure:azure-keyvault-core:1.0.0'
|
||||
compile 'com.google.guava:guava:20.0'
|
||||
compile 'org.apache.commons:commons-lang3:3.4'
|
||||
}
|
||||
|
||||
|
@ -40,6 +41,18 @@ thirdPartyAudit.excludes = [
|
|||
// Optional and not enabled by Elasticsearch
|
||||
'org.slf4j.Logger',
|
||||
'org.slf4j.LoggerFactory',
|
||||
// uses internal java api: sun.misc.Unsafe
|
||||
'com.google.common.cache.Striped64',
|
||||
'com.google.common.cache.Striped64$1',
|
||||
'com.google.common.cache.Striped64$Cell',
|
||||
'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1',
|
||||
'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2',
|
||||
'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3',
|
||||
'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper',
|
||||
'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1',
|
||||
'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray',
|
||||
'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator',
|
||||
'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1',
|
||||
]
|
||||
|
||||
check {
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
35f7ac687462f491d0f8b0d96733dfe347493d70
|
|
@ -0,0 +1 @@
|
|||
e89dd5e621e21b753096ec6a03f203c01482c612
|
|
@ -1 +0,0 @@
|
|||
ba8f04bfeac08016c0f88423a202d0f3aac03aed
|
|
@ -0,0 +1 @@
|
|||
f6759c16ade4e2a05bc1dfbaf55161b9ed0e78b9
|
|
@ -0,0 +1 @@
|
|||
89507701249388e1ed5ddcf8c41f4ce1be7831ef
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -61,7 +61,10 @@ import static java.util.Collections.emptyMap;
|
|||
public class AzureStorageService extends AbstractComponent {
|
||||
|
||||
public static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES);
|
||||
public static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(64, ByteSizeUnit.MB);
|
||||
/**
|
||||
* {@link com.microsoft.azure.storage.blob.BlobConstants#MAX_SINGLE_UPLOAD_BLOB_SIZE_IN_BYTES}
|
||||
*/
|
||||
public static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(256, ByteSizeUnit.MB);
|
||||
|
||||
// 'package' for testing
|
||||
volatile Map<String, AzureStorageSettings> storageSettings = emptyMap();
|
||||
|
|
|
@ -104,7 +104,7 @@ public class AzureRepositorySettingsTests extends ESTestCase {
|
|||
assertEquals(AzureStorageService.MAX_CHUNK_SIZE, azureRepository.chunkSize());
|
||||
|
||||
// chunk size in settings
|
||||
int size = randomIntBetween(1, 64);
|
||||
int size = randomIntBetween(1, 256);
|
||||
azureRepository = azureRepository(Settings.builder().put("chunk_size", size + "mb").build());
|
||||
assertEquals(new ByteSizeValue(size, ByteSizeUnit.MB), azureRepository.chunkSize());
|
||||
|
||||
|
@ -120,8 +120,8 @@ public class AzureRepositorySettingsTests extends ESTestCase {
|
|||
|
||||
// greater than max chunk size not allowed
|
||||
e = expectThrows(IllegalArgumentException.class, () ->
|
||||
azureRepository(Settings.builder().put("chunk_size", "65mb").build()));
|
||||
assertEquals("failed to parse value [65mb] for setting [chunk_size], must be <= [64mb]", e.getMessage());
|
||||
azureRepository(Settings.builder().put("chunk_size", "257mb").build()));
|
||||
assertEquals("failed to parse value [257mb] for setting [chunk_size], must be <= [256mb]", e.getMessage());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -87,7 +87,7 @@ public class EvilBootstrapChecksTests extends ESTestCase {
|
|||
final boolean enforceLimits = randomBoolean();
|
||||
final IllegalArgumentException e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> BootstrapChecks.check(new BootstrapContext(Settings.EMPTY, null), enforceLimits, emptyList(), "testInvalidValue"));
|
||||
() -> BootstrapChecks.check(new BootstrapContext(Settings.EMPTY, null), enforceLimits, emptyList()));
|
||||
final Matcher<String> matcher = containsString(
|
||||
"[es.enforce.bootstrap.checks] must be [true] but was [" + value + "]");
|
||||
assertThat(e, hasToString(matcher));
|
||||
|
|
|
@ -56,6 +56,7 @@ import java.util.regex.Pattern;
|
|||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static org.hamcrest.Matchers.endsWith;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
|
@ -319,9 +320,11 @@ public class EvilLoggerTests extends ESTestCase {
|
|||
assertThat(events.size(), equalTo(expectedLogLines + stackTraceLength));
|
||||
for (int i = 0; i < expectedLogLines; i++) {
|
||||
if (prefix == null) {
|
||||
assertThat(events.get(i), startsWith("test"));
|
||||
assertThat("Contents of [" + path + "] are wrong",
|
||||
events.get(i), startsWith("[" + getTestName() + "] test"));
|
||||
} else {
|
||||
assertThat(events.get(i), startsWith("[" + prefix + "] test"));
|
||||
assertThat("Contents of [" + path + "] are wrong",
|
||||
events.get(i), startsWith("[" + getTestName() + "][" + prefix + "] test"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -357,6 +360,24 @@ public class EvilLoggerTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testNoNodeNameWarning() throws IOException, UserException {
|
||||
setupLogging("no_node_name");
|
||||
|
||||
final String path =
|
||||
System.getProperty("es.logs.base_path") +
|
||||
System.getProperty("file.separator") +
|
||||
System.getProperty("es.logs.cluster_name") + ".log";
|
||||
final List<String> events = Files.readAllLines(PathUtils.get(path));
|
||||
assertThat(events.size(), equalTo(2));
|
||||
final String location = "org.elasticsearch.common.logging.LogConfigurator";
|
||||
// the first message is a warning for unsupported configuration files
|
||||
assertLogLine(events.get(0), Level.WARN, location, "\\[null\\] Some logging configurations have %marker but don't "
|
||||
+ "have %node_name. We will automatically add %node_name to the pattern to ease the migration for users "
|
||||
+ "who customize log4j2.properties but will stop this behavior in 7.0. You should manually replace "
|
||||
+ "`%node_name` with `\\[%node_name\\]%marker ` in these locations:");
|
||||
assertThat(events.get(1), endsWith("no_node_name/log4j2.properties"));
|
||||
}
|
||||
|
||||
private void setupLogging(final String config) throws IOException, UserException {
|
||||
setupLogging(config, Settings.EMPTY);
|
||||
}
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%test_thread_info]%marker %m%n
|
||||
|
||||
appender.file.type = File
|
||||
appender.file.name = file
|
||||
appender.file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
|
||||
appender.file.layout.type = PatternLayout
|
||||
appender.file.layout.pattern = [%p][%l] %marker%m%n
|
||||
appender.file.layout.pattern = [%p][%l] [%test_thread_info]%marker %m%n
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.console.ref = console
|
||||
|
@ -23,10 +23,9 @@ appender.deprecation_file.type = File
|
|||
appender.deprecation_file.name = deprecation_file
|
||||
appender.deprecation_file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
|
||||
appender.deprecation_file.layout.type = PatternLayout
|
||||
appender.deprecation_file.layout.pattern = [%p][%l] %marker%m%n
|
||||
appender.deprecation_file.layout.pattern = [%p][%l] [%test_thread_info]%marker %m%n
|
||||
|
||||
logger.deprecation.name = deprecation
|
||||
logger.deprecation.level = warn
|
||||
logger.deprecation.appenderRef.deprecation_file.ref = deprecation_file
|
||||
logger.deprecation.additivity = false
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
appender.console3.type = Console
|
||||
appender.console3.name = console3
|
||||
appender.console3.layout.type = PatternLayout
|
||||
appender.console3.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
|
||||
appender.console3.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%test_thread_info]%marker %m%n
|
||||
|
||||
logger.third.name = third
|
||||
logger.third.level = debug
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%test_thread_info]%marker %m%n
|
||||
|
||||
appender.file.type = File
|
||||
appender.file.name = file
|
||||
appender.file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
|
||||
appender.file.layout.type = PatternLayout
|
||||
appender.file.layout.pattern = [%p][%l] %marker%m%n
|
||||
appender.file.layout.pattern = [%p][%l] [%test_thread_info]%marker %m%n
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.console.ref = console
|
||||
|
@ -17,7 +17,7 @@ appender.deprecation_file.type = File
|
|||
appender.deprecation_file.name = deprecation_file
|
||||
appender.deprecation_file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
|
||||
appender.deprecation_file.layout.type = PatternLayout
|
||||
appender.deprecation_file.layout.pattern = [%p][%l] %marker%m%n
|
||||
appender.deprecation_file.layout.pattern = [%p][%l] [%test_thread_info]%marker %m%n
|
||||
|
||||
logger.deprecation.name = deprecation
|
||||
logger.deprecation.level = warn
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%test_thread_info]%marker %m%n
|
||||
|
||||
logger.has_console_appender.name = has_console_appender
|
||||
logger.has_console_appender.level = trace
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%test_thread_info]%marker %m%n
|
||||
|
||||
appender.file.type = File
|
||||
appender.file.name = file
|
||||
appender.file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
|
||||
appender.file.layout.type = PatternLayout
|
||||
appender.file.layout.pattern = [%p][%l] %marker%m%n
|
||||
appender.file.layout.pattern = [%p][%l] [%test_thread_info]%marker %m%n
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.console.ref = console
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
|
||||
|
||||
appender.file.type = File
|
||||
appender.file.name = file
|
||||
appender.file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
|
||||
appender.file.layout.type = PatternLayout
|
||||
appender.file.layout.pattern = [%p][%l] %marker%m%n
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.console.ref = console
|
||||
rootLogger.appenderRef.file.ref = file
|
||||
|
||||
appender.deprecation_file.type = File
|
||||
appender.deprecation_file.name = deprecation_file
|
||||
appender.deprecation_file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
|
||||
appender.deprecation_file.layout.type = PatternLayout
|
||||
appender.deprecation_file.layout.pattern = [%p][%l] %marker%m%n
|
||||
|
||||
logger.deprecation.name = deprecation
|
||||
logger.deprecation.level = warn
|
||||
logger.deprecation.appenderRef.deprecation_file.ref = deprecation_file
|
||||
logger.deprecation.additivity = false
|
|
@ -1,13 +1,13 @@
|
|||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%test_thread_info]%marker %m%n
|
||||
|
||||
appender.file.type = File
|
||||
appender.file.name = file
|
||||
appender.file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
|
||||
appender.file.layout.type = PatternLayout
|
||||
appender.file.layout.pattern = %marker%m%n
|
||||
appender.file.layout.pattern = [%test_thread_info]%marker %m%n
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.console.ref = console
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%m%n
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%test_thread_info]%marker %m%n
|
||||
|
||||
appender.file.type = File
|
||||
appender.file.name = file
|
||||
appender.file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
|
||||
appender.file.layout.type = PatternLayout
|
||||
appender.file.layout.pattern = [%p][%l] %marker%m%n
|
||||
appender.file.layout.pattern = [%p][%l] [%test_thread_info]%marker %m%n
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.console.ref = console
|
||||
|
@ -17,7 +17,7 @@ appender.deprecation_file.type = File
|
|||
appender.deprecation_file.name = deprecation_file
|
||||
appender.deprecation_file.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
|
||||
appender.deprecation_file.layout.type = PatternLayout
|
||||
appender.deprecation_file.layout.pattern = [%p][%l] %marker%m%n
|
||||
appender.deprecation_file.layout.pattern = [%p][%l] [%test_thread_info]%marker %m%n
|
||||
|
||||
logger.deprecation.name = org.elasticsearch.deprecation.common.settings
|
||||
logger.deprecation.level = warn
|
||||
|
|
|
@ -109,8 +109,9 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
final ActionListener<RolloverResponse> listener) {
|
||||
final MetaData metaData = state.metaData();
|
||||
validate(metaData, rolloverRequest);
|
||||
final AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(rolloverRequest.getAlias());
|
||||
final IndexMetaData indexMetaData = aliasOrIndex.getIndices().get(0);
|
||||
final AliasOrIndex.Alias alias = (AliasOrIndex.Alias) metaData.getAliasAndIndexLookup().get(rolloverRequest.getAlias());
|
||||
final IndexMetaData indexMetaData = alias.getWriteIndex();
|
||||
final boolean explicitWriteIndex = Boolean.TRUE.equals(indexMetaData.getAliases().get(alias.getAliasName()).writeIndex());
|
||||
final String sourceProvidedName = indexMetaData.getSettings().get(IndexMetaData.SETTING_INDEX_PROVIDED_NAME,
|
||||
indexMetaData.getIndex().getName());
|
||||
final String sourceIndexName = indexMetaData.getIndex().getName();
|
||||
|
@ -138,10 +139,15 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(unresolvedName, rolloverIndexName,
|
||||
rolloverRequest);
|
||||
createIndexService.createIndex(updateRequest, ActionListener.wrap(createIndexClusterStateUpdateResponse -> {
|
||||
// switch the alias to point to the newly created index
|
||||
indexAliasesService.indicesAliases(
|
||||
prepareRolloverAliasesUpdateRequest(sourceIndexName, rolloverIndexName,
|
||||
rolloverRequest),
|
||||
final IndicesAliasesClusterStateUpdateRequest aliasesUpdateRequest;
|
||||
if (explicitWriteIndex) {
|
||||
aliasesUpdateRequest = prepareRolloverAliasesWriteIndexUpdateRequest(sourceIndexName,
|
||||
rolloverIndexName, rolloverRequest);
|
||||
} else {
|
||||
aliasesUpdateRequest = prepareRolloverAliasesUpdateRequest(sourceIndexName,
|
||||
rolloverIndexName, rolloverRequest);
|
||||
}
|
||||
indexAliasesService.indicesAliases(aliasesUpdateRequest,
|
||||
ActionListener.wrap(aliasClusterStateUpdateResponse -> {
|
||||
if (aliasClusterStateUpdateResponse.isAcknowledged()) {
|
||||
clusterService.submitStateUpdateTask("update_rollover_info", new ClusterStateUpdateTask() {
|
||||
|
@ -196,8 +202,19 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
static IndicesAliasesClusterStateUpdateRequest prepareRolloverAliasesUpdateRequest(String oldIndex, String newIndex,
|
||||
RolloverRequest request) {
|
||||
List<AliasAction> actions = unmodifiableList(Arrays.asList(
|
||||
new AliasAction.Add(newIndex, request.getAlias(), null, null, null, null),
|
||||
new AliasAction.Remove(oldIndex, request.getAlias())));
|
||||
new AliasAction.Add(newIndex, request.getAlias(), null, null, null, null),
|
||||
new AliasAction.Remove(oldIndex, request.getAlias())));
|
||||
final IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest(actions)
|
||||
.ackTimeout(request.ackTimeout())
|
||||
.masterNodeTimeout(request.masterNodeTimeout());
|
||||
return updateRequest;
|
||||
}
|
||||
|
||||
static IndicesAliasesClusterStateUpdateRequest prepareRolloverAliasesWriteIndexUpdateRequest(String oldIndex, String newIndex,
|
||||
RolloverRequest request) {
|
||||
List<AliasAction> actions = unmodifiableList(Arrays.asList(
|
||||
new AliasAction.Add(newIndex, request.getAlias(), null, null, null, true),
|
||||
new AliasAction.Add(oldIndex, request.getAlias(), null, null, null, false)));
|
||||
final IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest(actions)
|
||||
.ackTimeout(request.ackTimeout())
|
||||
.masterNodeTimeout(request.masterNodeTimeout());
|
||||
|
@ -244,8 +261,9 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
if (aliasOrIndex.isAlias() == false) {
|
||||
throw new IllegalArgumentException("source alias is a concrete index");
|
||||
}
|
||||
if (aliasOrIndex.getIndices().size() != 1) {
|
||||
throw new IllegalArgumentException("source alias maps to multiple indices");
|
||||
final AliasOrIndex.Alias alias = (AliasOrIndex.Alias) aliasOrIndex;
|
||||
if (alias.getWriteIndex() == null) {
|
||||
throw new IllegalArgumentException("source alias [" + alias.getAliasName() + "] does not point to a write index");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.common.inject.CreationException;
|
|||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.logging.LogConfigurator;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.logging.NodeNamePatternConverter;
|
||||
import org.elasticsearch.common.network.IfConfig;
|
||||
import org.elasticsearch.common.settings.KeyStoreWrapper;
|
||||
import org.elasticsearch.common.settings.SecureSettings;
|
||||
|
@ -287,6 +288,10 @@ final class Bootstrap {
|
|||
|
||||
final SecureSettings keystore = loadSecureSettings(initialEnv);
|
||||
final Environment environment = createEnvironment(pidFile, keystore, initialEnv.settings(), initialEnv.configFile());
|
||||
|
||||
String nodeName = Node.NODE_NAME_SETTING.get(environment.settings());
|
||||
NodeNamePatternConverter.setNodeName(nodeName);
|
||||
|
||||
try {
|
||||
LogConfigurator.configure(environment);
|
||||
} catch (IOException e) {
|
||||
|
@ -317,8 +322,7 @@ final class Bootstrap {
|
|||
// install the default uncaught exception handler; must be done before security is
|
||||
// initialized as we do not want to grant the runtime permission
|
||||
// setDefaultUncaughtExceptionHandler
|
||||
Thread.setDefaultUncaughtExceptionHandler(
|
||||
new ElasticsearchUncaughtExceptionHandler(() -> Node.NODE_NAME_SETTING.get(environment.settings())));
|
||||
Thread.setDefaultUncaughtExceptionHandler(new ElasticsearchUncaughtExceptionHandler());
|
||||
|
||||
INSTANCE.setup(true, environment);
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.elasticsearch.common.transport.TransportAddress;
|
|||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.NodeValidationException;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
|
@ -74,8 +73,7 @@ final class BootstrapChecks {
|
|||
combinedChecks.addAll(additionalChecks);
|
||||
check( context,
|
||||
enforceLimits(boundTransportAddress, DiscoveryModule.DISCOVERY_TYPE_SETTING.get(context.settings)),
|
||||
Collections.unmodifiableList(combinedChecks),
|
||||
Node.NODE_NAME_SETTING.get(context.settings));
|
||||
Collections.unmodifiableList(combinedChecks));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -86,14 +84,12 @@ final class BootstrapChecks {
|
|||
* @param context the current node boostrap context
|
||||
* @param enforceLimits {@code true} if the checks should be enforced or otherwise warned
|
||||
* @param checks the checks to execute
|
||||
* @param nodeName the node name to be used as a logging prefix
|
||||
*/
|
||||
static void check(
|
||||
final BootstrapContext context,
|
||||
final boolean enforceLimits,
|
||||
final List<BootstrapCheck> checks,
|
||||
final String nodeName) throws NodeValidationException {
|
||||
check(context, enforceLimits, checks, Loggers.getLogger(BootstrapChecks.class, nodeName));
|
||||
final List<BootstrapCheck> checks) throws NodeValidationException {
|
||||
check(context, enforceLimits, checks, Loggers.getLogger(BootstrapChecks.class));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -27,16 +27,9 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
import java.io.IOError;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionHandler {
|
||||
|
||||
private final Supplier<String> loggingPrefixSupplier;
|
||||
|
||||
ElasticsearchUncaughtExceptionHandler(final Supplier<String> loggingPrefixSupplier) {
|
||||
this.loggingPrefixSupplier = Objects.requireNonNull(loggingPrefixSupplier);
|
||||
}
|
||||
private static final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class);
|
||||
|
||||
@Override
|
||||
public void uncaughtException(Thread t, Throwable e) {
|
||||
|
@ -70,12 +63,10 @@ class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionH
|
|||
}
|
||||
|
||||
void onFatalUncaught(final String threadName, final Throwable t) {
|
||||
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
||||
logger.error(() -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t);
|
||||
}
|
||||
|
||||
void onNonFatalUncaught(final String threadName, final Throwable t) {
|
||||
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
||||
logger.warn(() -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t);
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,8 @@ import java.util.Locale;
|
|||
import java.util.Objects;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.apache.lucene.geo.GeoUtils.orient;
|
||||
|
||||
/**
|
||||
* The {@link PolygonBuilder} implements the groundwork to create polygons. This contains
|
||||
* Methods to wrap polygons at the dateline and building shapes from the data held by the
|
||||
|
@ -642,14 +644,8 @@ public class PolygonBuilder extends ShapeBuilder<JtsGeometry, PolygonBuilder> {
|
|||
*/
|
||||
private static Edge[] ring(int component, boolean direction, boolean handedness,
|
||||
Coordinate[] points, int offset, Edge[] edges, int toffset, int length, final AtomicBoolean translated) {
|
||||
// calculate the direction of the points:
|
||||
// find the point a the top of the set and check its
|
||||
// neighbors orientation. So direction is equivalent
|
||||
// to clockwise/counterclockwise
|
||||
final int top = top(points, offset, length);
|
||||
final int prev = (offset + ((top + length - 1) % length));
|
||||
final int next = (offset + ((top + 1) % length));
|
||||
boolean orientation = points[offset + prev].x > points[offset + next].x;
|
||||
|
||||
boolean orientation = getOrientation(points, offset, length);
|
||||
|
||||
// OGC requires shell as ccw (Right-Handedness) and holes as cw (Left-Handedness)
|
||||
// since GeoJSON doesn't specify (and doesn't need to) GEO core will assume OGC standards
|
||||
|
@ -678,6 +674,35 @@ public class PolygonBuilder extends ShapeBuilder<JtsGeometry, PolygonBuilder> {
|
|||
return concat(component, direction ^ orientation, points, offset, edges, toffset, length);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return whether the points are clockwise (true) or anticlockwise (false)
|
||||
*/
|
||||
private static boolean getOrientation(Coordinate[] points, int offset, int length) {
|
||||
// calculate the direction of the points: find the southernmost point
|
||||
// and check its neighbors orientation.
|
||||
|
||||
final int top = top(points, offset, length);
|
||||
final int prev = (top + length - 1) % length;
|
||||
final int next = (top + 1) % length;
|
||||
|
||||
final int determinantSign = orient(
|
||||
points[offset + prev].x, points[offset + prev].y,
|
||||
points[offset + top].x, points[offset + top].y,
|
||||
points[offset + next].x, points[offset + next].y);
|
||||
|
||||
if (determinantSign == 0) {
|
||||
// Points are collinear, but `top` is not in the middle if so, so the edges either side of `top` are intersecting.
|
||||
throw new InvalidShapeException("Cannot determine orientation: edges adjacent to ("
|
||||
+ points[offset + top].x + "," + points[offset +top].y + ") coincide");
|
||||
}
|
||||
|
||||
return determinantSign < 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the (offset) index of the point that is furthest west amongst
|
||||
* those points that are the furthest south in the set.
|
||||
*/
|
||||
private static int top(Coordinate[] points, int offset, int length) {
|
||||
int top = 0; // we start at 1 here since top points to 0
|
||||
for (int i = 1; i < length; i++) {
|
||||
|
|
|
@ -46,6 +46,15 @@ public final class ESLoggerFactory {
|
|||
}
|
||||
|
||||
public static Logger getLogger(String prefix, Logger logger) {
|
||||
/*
|
||||
* In a followup we'll throw an exception if prefix is null or empty
|
||||
* redirecting folks to LogManager.getLogger.
|
||||
*
|
||||
* This and more is tracked in https://github.com/elastic/elasticsearch/issues/32174
|
||||
*/
|
||||
if (prefix == null || prefix.length() == 0) {
|
||||
return logger;
|
||||
}
|
||||
return new PrefixLogger((ExtendedLogger)logger, logger.getName(), prefix);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,12 +23,16 @@ import org.apache.logging.log4j.Level;
|
|||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.core.LoggerContext;
|
||||
import org.apache.logging.log4j.core.config.AbstractConfiguration;
|
||||
import org.apache.logging.log4j.core.config.ConfigurationException;
|
||||
import org.apache.logging.log4j.core.config.ConfigurationSource;
|
||||
import org.apache.logging.log4j.core.config.Configurator;
|
||||
import org.apache.logging.log4j.core.config.builder.api.ConfigurationBuilder;
|
||||
import org.apache.logging.log4j.core.config.builder.api.ConfigurationBuilderFactory;
|
||||
import org.apache.logging.log4j.core.config.builder.impl.BuiltConfiguration;
|
||||
import org.apache.logging.log4j.core.config.composite.CompositeConfiguration;
|
||||
import org.apache.logging.log4j.core.config.plugins.util.PluginManager;
|
||||
import org.apache.logging.log4j.core.config.properties.PropertiesConfiguration;
|
||||
import org.apache.logging.log4j.core.config.properties.PropertiesConfigurationBuilder;
|
||||
import org.apache.logging.log4j.core.config.properties.PropertiesConfigurationFactory;
|
||||
import org.apache.logging.log4j.status.StatusConsoleListener;
|
||||
import org.apache.logging.log4j.status.StatusData;
|
||||
|
@ -43,6 +47,7 @@ import org.elasticsearch.env.Environment;
|
|||
import org.elasticsearch.node.Node;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.FileVisitOption;
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.Files;
|
||||
|
@ -50,9 +55,12 @@ import java.nio.file.Path;
|
|||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
@ -119,6 +127,13 @@ public class LogConfigurator {
|
|||
configure(environment.settings(), environment.configFile(), environment.logsFile());
|
||||
}
|
||||
|
||||
/**
|
||||
* Load logging plugins so we can have {@code node_name} in the pattern.
|
||||
*/
|
||||
public static void loadLog4jPlugins() {
|
||||
PluginManager.addPackage(LogConfigurator.class.getPackage().getName());
|
||||
}
|
||||
|
||||
private static void checkErrorListener() {
|
||||
assert errorListenerIsRegistered() : "expected error listener to be registered";
|
||||
if (error.get()) {
|
||||
|
@ -135,6 +150,8 @@ public class LogConfigurator {
|
|||
Objects.requireNonNull(configsPath);
|
||||
Objects.requireNonNull(logsPath);
|
||||
|
||||
loadLog4jPlugins();
|
||||
|
||||
setLogConfigurationSystemProperty(logsPath, settings);
|
||||
// we initialize the status logger immediately otherwise Log4j will complain when we try to get the context
|
||||
configureStatusLogger();
|
||||
|
@ -142,7 +159,53 @@ public class LogConfigurator {
|
|||
final LoggerContext context = (LoggerContext) LogManager.getContext(false);
|
||||
|
||||
final List<AbstractConfiguration> configurations = new ArrayList<>();
|
||||
final PropertiesConfigurationFactory factory = new PropertiesConfigurationFactory();
|
||||
|
||||
/*
|
||||
* Subclass the properties configurator to hack the new pattern in
|
||||
* place so users don't have to change log4j2.properties in
|
||||
* a minor release. In 7.0 we'll remove this and force users to
|
||||
* change log4j2.properties. If they don't customize log4j2.properties
|
||||
* then they won't have to do anything anyway.
|
||||
*
|
||||
* Everything in this subclass that isn't marked as a hack is copied
|
||||
* from log4j2's source.
|
||||
*/
|
||||
Set<String> locationsWithDeprecatedPatterns = Collections.synchronizedSet(new HashSet<>());
|
||||
final PropertiesConfigurationFactory factory = new PropertiesConfigurationFactory() {
|
||||
@Override
|
||||
public PropertiesConfiguration getConfiguration(final LoggerContext loggerContext, final ConfigurationSource source) {
|
||||
final Properties properties = new Properties();
|
||||
try (InputStream configStream = source.getInputStream()) {
|
||||
properties.load(configStream);
|
||||
} catch (final IOException ioe) {
|
||||
throw new ConfigurationException("Unable to load " + source.toString(), ioe);
|
||||
}
|
||||
// Hack the new pattern into place
|
||||
for (String name : properties.stringPropertyNames()) {
|
||||
if (false == name.endsWith(".pattern")) continue;
|
||||
// Null is weird here but we can't do anything with it so ignore it
|
||||
String value = properties.getProperty(name);
|
||||
if (value == null) continue;
|
||||
// Tests don't need to be changed
|
||||
if (value.contains("%test_thread_info")) continue;
|
||||
/*
|
||||
* Patterns without a marker are sufficiently customized
|
||||
* that we don't have an opinion about them.
|
||||
*/
|
||||
if (false == value.contains("%marker")) continue;
|
||||
if (false == value.contains("%node_name")) {
|
||||
locationsWithDeprecatedPatterns.add(source.getLocation());
|
||||
properties.setProperty(name, value.replace("%marker", "[%node_name]%marker "));
|
||||
}
|
||||
}
|
||||
// end hack
|
||||
return new PropertiesConfigurationBuilder()
|
||||
.setConfigurationSource(source)
|
||||
.setRootProperties(properties)
|
||||
.setLoggerContext(loggerContext)
|
||||
.build();
|
||||
}
|
||||
};
|
||||
final Set<FileVisitOption> options = EnumSet.of(FileVisitOption.FOLLOW_LINKS);
|
||||
Files.walkFileTree(configsPath, options, Integer.MAX_VALUE, new SimpleFileVisitor<Path>() {
|
||||
@Override
|
||||
|
@ -163,6 +226,14 @@ public class LogConfigurator {
|
|||
context.start(new CompositeConfiguration(configurations));
|
||||
|
||||
configureLoggerLevels(settings);
|
||||
|
||||
final String deprecatedLocationsString = String.join("\n ", locationsWithDeprecatedPatterns);
|
||||
if (deprecatedLocationsString.length() > 0) {
|
||||
LogManager.getLogger(LogConfigurator.class).warn("Some logging configurations have %marker but don't have %node_name. "
|
||||
+ "We will automatically add %node_name to the pattern to ease the migration for users who customize "
|
||||
+ "log4j2.properties but will stop this behavior in 7.0. You should manually replace `%node_name` with "
|
||||
+ "`[%node_name]%marker ` in these locations:\n {}", deprecatedLocationsString);
|
||||
}
|
||||
}
|
||||
|
||||
private static void configureStatusLogger() {
|
||||
|
|
|
@ -31,13 +31,9 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Arrays.asList;
|
||||
import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
|
||||
|
||||
/**
|
||||
|
@ -71,29 +67,19 @@ public class Loggers {
|
|||
}
|
||||
|
||||
public static Logger getLogger(Class<?> clazz, Settings settings, String... prefixes) {
|
||||
final List<String> prefixesList = prefixesList(settings, prefixes);
|
||||
return Loggers.getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()]));
|
||||
return Loggers.getLogger(clazz, prefixes);
|
||||
}
|
||||
|
||||
public static Logger getLogger(String loggerName, Settings settings, String... prefixes) {
|
||||
final List<String> prefixesList = prefixesList(settings, prefixes);
|
||||
return Loggers.getLogger(loggerName, prefixesList.toArray(new String[prefixesList.size()]));
|
||||
}
|
||||
|
||||
private static List<String> prefixesList(Settings settings, String... prefixes) {
|
||||
List<String> prefixesList = new ArrayList<>();
|
||||
if (Node.NODE_NAME_SETTING.exists(settings)) {
|
||||
prefixesList.add(Node.NODE_NAME_SETTING.get(settings));
|
||||
}
|
||||
if (prefixes != null && prefixes.length > 0) {
|
||||
prefixesList.addAll(asList(prefixes));
|
||||
}
|
||||
return prefixesList;
|
||||
return Loggers.getLogger(loggerName, prefixes);
|
||||
}
|
||||
|
||||
public static Logger getLogger(Logger parentLogger, String s) {
|
||||
assert parentLogger instanceof PrefixLogger;
|
||||
return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s);
|
||||
String prefix = null;
|
||||
if (parentLogger instanceof PrefixLogger) {
|
||||
prefix = ((PrefixLogger)parentLogger).prefix();
|
||||
}
|
||||
return ESLoggerFactory.getLogger(prefix, parentLogger.getName() + s);
|
||||
}
|
||||
|
||||
public static Logger getLogger(String s) {
|
||||
|
@ -126,7 +112,6 @@ public class Loggers {
|
|||
}
|
||||
}
|
||||
if (sb.length() > 0) {
|
||||
sb.append(" ");
|
||||
prefix = sb.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.logging;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.logging.log4j.core.LogEvent;
|
||||
import org.apache.logging.log4j.core.config.plugins.Plugin;
|
||||
import org.apache.logging.log4j.core.pattern.ConverterKeys;
|
||||
import org.apache.logging.log4j.core.pattern.LogEventPatternConverter;
|
||||
import org.apache.logging.log4j.core.pattern.PatternConverter;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
|
||||
/**
|
||||
* Converts {@code %node_name} in log4j patterns into the current node name.
|
||||
* We *could* use a system property lookup instead but this is very explicit
|
||||
* and fails fast if we try to use the logger without initializing the node
|
||||
* name. As a bonus it ought to be ever so slightly faster because it doesn't
|
||||
* have to look up the system property every time.
|
||||
*/
|
||||
@Plugin(category = PatternConverter.CATEGORY, name = "NodeNamePatternConverter")
|
||||
@ConverterKeys({"node_name"})
|
||||
public class NodeNamePatternConverter extends LogEventPatternConverter {
|
||||
private static final SetOnce<String> NODE_NAME = new SetOnce<>();
|
||||
|
||||
/**
|
||||
* Set the name of this node.
|
||||
*/
|
||||
public static void setNodeName(String nodeName) {
|
||||
NODE_NAME.set(nodeName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by log4j2 to initialize this converter.
|
||||
*/
|
||||
public static NodeNamePatternConverter newInstance(final String[] options) {
|
||||
if (options.length > 0) {
|
||||
throw new IllegalArgumentException("no options supported but options provided: "
|
||||
+ Arrays.toString(options));
|
||||
}
|
||||
return new NodeNamePatternConverter(NODE_NAME.get());
|
||||
}
|
||||
|
||||
private final String nodeName;
|
||||
|
||||
private NodeNamePatternConverter(String nodeName) {
|
||||
super("NodeName", "node_name");
|
||||
this.nodeName = nodeName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void format(LogEvent event, StringBuilder toAppendTo) {
|
||||
toAppendTo.append(nodeName);
|
||||
}
|
||||
}
|
|
@ -359,7 +359,7 @@ public class KeyStoreWrapper implements SecureSettings {
|
|||
if (input.read() != -1) {
|
||||
throw new SecurityException("Keystore has been corrupted or tampered with");
|
||||
}
|
||||
} catch (EOFException e) {
|
||||
} catch (IOException e) {
|
||||
throw new SecurityException("Keystore has been corrupted or tampered with", e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.get;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
|
@ -30,6 +31,7 @@ import org.elasticsearch.common.xcontent.ToXContentObject;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.IgnoredFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
||||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
|
@ -225,10 +227,13 @@ public class GetResult implements Streamable, Iterable<DocumentField>, ToXConten
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (DocumentField field : metaFields) {
|
||||
Object value = field.getValue();
|
||||
builder.field(field.getName(), value);
|
||||
// TODO: can we avoid having an exception here?
|
||||
if (field.getName().equals(IgnoredFieldMapper.NAME)) {
|
||||
builder.field(field.getName(), field.getValues());
|
||||
} else {
|
||||
builder.field(field.getName(), field.<Object>getValue());
|
||||
}
|
||||
}
|
||||
|
||||
builder.field(FOUND, exists);
|
||||
|
@ -316,7 +321,11 @@ public class GetResult implements Streamable, Iterable<DocumentField>, ToXConten
|
|||
parser.skipChildren(); // skip potential inner objects for forward compatibility
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
parser.skipChildren(); // skip potential inner arrays for forward compatibility
|
||||
if (IgnoredFieldMapper.NAME.equals(currentFieldName)) {
|
||||
fields.put(currentFieldName, new DocumentField(currentFieldName, parser.list()));
|
||||
} else {
|
||||
parser.skipChildren(); // skip potential inner arrays for forward compatibility
|
||||
}
|
||||
}
|
||||
}
|
||||
return new GetResult(index, type, id, version, found, source, fields);
|
||||
|
@ -400,7 +409,12 @@ public class GetResult implements Streamable, Iterable<DocumentField>, ToXConten
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(index, type, id, version, exists, fields, sourceAsMap());
|
||||
return Objects.hash(version, exists, index, type, id, fields, sourceAsMap());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this, true, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,8 +23,11 @@ import org.elasticsearch.action.ActionRequestValidationException;
|
|||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
|
@ -53,6 +56,10 @@ public class DeleteByQueryRequest extends AbstractBulkByScrollRequest<DeleteByQu
|
|||
this(search, true);
|
||||
}
|
||||
|
||||
public DeleteByQueryRequest(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
}
|
||||
|
||||
private DeleteByQueryRequest(SearchRequest search, boolean setDefaults) {
|
||||
super(search, setDefaults);
|
||||
// Delete-By-Query does not require the source
|
||||
|
|
|
@ -59,6 +59,13 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest<ReindexRequ
|
|||
this.destination = destination;
|
||||
}
|
||||
|
||||
public ReindexRequest(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
destination = new IndexRequest();
|
||||
destination.readFrom(in);
|
||||
remoteInfo = in.readOptionalWriteable(RemoteInfo::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReindexRequest self() {
|
||||
return this;
|
||||
|
@ -135,10 +142,7 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest<ReindexRequ
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
destination = new IndexRequest();
|
||||
destination.readFrom(in);
|
||||
remoteInfo = in.readOptionalWriteable(RemoteInfo::new);
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -47,6 +47,11 @@ public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest<Updat
|
|||
this(search, true);
|
||||
}
|
||||
|
||||
public UpdateByQueryRequest(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
pipeline = in.readOptionalString();
|
||||
}
|
||||
|
||||
private UpdateByQueryRequest(SearchRequest search, boolean setDefaults) {
|
||||
super(search, setDefaults);
|
||||
}
|
||||
|
@ -108,8 +113,7 @@ public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest<Updat
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
pipeline = in.readOptionalString();
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -420,6 +420,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
|||
// state may result in a new shard being initialized while having the same allocation id as the currently started shard.
|
||||
logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting);
|
||||
indexService.removeShard(shardId.id(), "removing shard (stale copy)");
|
||||
} else if (newShardRouting.primary() && currentRoutingEntry.primary() == false && newShardRouting.initializing()) {
|
||||
assert currentRoutingEntry.initializing() : currentRoutingEntry; // see above if clause
|
||||
// this can happen when cluster state batching batches activation of the shard, closing an index, reopening it
|
||||
// and assigning an initializing primary to this node
|
||||
logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting);
|
||||
indexService.removeShard(shardId.id(), "removing shard (stale copy)");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -527,8 +527,7 @@ public final class IngestDocument {
|
|||
|
||||
private static void appendValues(List<Object> list, Object value) {
|
||||
if (value instanceof List) {
|
||||
List<?> valueList = (List<?>) value;
|
||||
valueList.stream().forEach(list::add);
|
||||
list.addAll((List<?>) value);
|
||||
} else {
|
||||
list.add(value);
|
||||
}
|
||||
|
|
|
@ -231,7 +231,14 @@ public class Node implements Closeable {
|
|||
}
|
||||
|
||||
private static final String CLIENT_TYPE = "node";
|
||||
|
||||
private final Lifecycle lifecycle = new Lifecycle();
|
||||
|
||||
/**
|
||||
* Logger initialized in the ctor because if it were initialized statically
|
||||
* then it wouldn't get the node name.
|
||||
*/
|
||||
private final Logger logger;
|
||||
private final Injector injector;
|
||||
private final Settings settings;
|
||||
private final Settings originalSettings;
|
||||
|
@ -257,13 +264,9 @@ public class Node implements Closeable {
|
|||
}
|
||||
|
||||
protected Node(final Environment environment, Collection<Class<? extends Plugin>> classpathPlugins) {
|
||||
logger = Loggers.getLogger(Node.class);
|
||||
final List<Closeable> resourcesToClose = new ArrayList<>(); // register everything we need to release in the case of an error
|
||||
boolean success = false;
|
||||
{
|
||||
// use temp logger just to say we are starting. we can't use it later on because the node name might not be set
|
||||
Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(environment.settings()));
|
||||
logger.info("initializing ...");
|
||||
}
|
||||
try {
|
||||
originalSettings = environment.settings();
|
||||
Settings tmpSettings = Settings.builder().put(environment.settings())
|
||||
|
@ -279,7 +282,6 @@ public class Node implements Closeable {
|
|||
final boolean hadPredefinedNodeName = NODE_NAME_SETTING.exists(tmpSettings);
|
||||
final String nodeId = nodeEnvironment.nodeId();
|
||||
tmpSettings = addNodeNameIfNeeded(tmpSettings, nodeId);
|
||||
final Logger logger = Loggers.getLogger(Node.class, tmpSettings);
|
||||
// this must be captured after the node name is possibly added to the settings
|
||||
final String nodeName = NODE_NAME_SETTING.get(tmpSettings);
|
||||
if (hadPredefinedNodeName == false) {
|
||||
|
@ -631,7 +633,6 @@ public class Node implements Closeable {
|
|||
return this;
|
||||
}
|
||||
|
||||
Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
|
||||
logger.info("starting ...");
|
||||
pluginLifecycleComponents.forEach(LifecycleComponent::start);
|
||||
|
||||
|
@ -742,7 +743,6 @@ public class Node implements Closeable {
|
|||
if (!lifecycle.moveToStopped()) {
|
||||
return this;
|
||||
}
|
||||
Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
|
||||
logger.info("stopping ...");
|
||||
|
||||
injector.getInstance(ResourceWatcherService.class).stop();
|
||||
|
@ -785,7 +785,6 @@ public class Node implements Closeable {
|
|||
return;
|
||||
}
|
||||
|
||||
Logger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(settings));
|
||||
logger.info("closing ...");
|
||||
List<Closeable> toClose = new ArrayList<>();
|
||||
StopWatch stopWatch = new StopWatch("node_close");
|
||||
|
|
|
@ -19,6 +19,16 @@
|
|||
|
||||
package org.elasticsearch.search;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
|
@ -51,16 +61,6 @@ import org.elasticsearch.search.fetch.subphase.highlight.HighlightField;
|
|||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
import org.elasticsearch.transport.RemoteClusterAware;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
@ -107,6 +107,9 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable<D
|
|||
@Nullable
|
||||
private SearchShardTarget shard;
|
||||
|
||||
//These two fields normally get set when setting the shard target, so they hold the same values as the target thus don't get
|
||||
//serialized over the wire. When parsing hits back from xcontent though, in most of the cases (whenever explanation is disabled)
|
||||
//we can't rebuild the shard target object so we need to set these manually for users retrieval.
|
||||
private transient String index;
|
||||
private transient String clusterAlias;
|
||||
|
||||
|
@ -551,7 +554,26 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable<D
|
|||
Map<String, DocumentField> fields = get(Fields.FIELDS, values, Collections.emptyMap());
|
||||
|
||||
SearchHit searchHit = new SearchHit(-1, id, type, nestedIdentity, fields);
|
||||
searchHit.index = get(Fields._INDEX, values, null);
|
||||
String index = get(Fields._INDEX, values, null);
|
||||
String clusterAlias = null;
|
||||
if (index != null) {
|
||||
int indexOf = index.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR);
|
||||
if (indexOf > 0) {
|
||||
clusterAlias = index.substring(0, indexOf);
|
||||
index = index.substring(indexOf + 1);
|
||||
}
|
||||
}
|
||||
ShardId shardId = get(Fields._SHARD, values, null);
|
||||
String nodeId = get(Fields._NODE, values, null);
|
||||
if (shardId != null && nodeId != null) {
|
||||
assert shardId.getIndexName().equals(index);
|
||||
searchHit.shard(new SearchShardTarget(nodeId, shardId, clusterAlias, OriginalIndices.NONE));
|
||||
} else {
|
||||
//these fields get set anyways when setting the shard target,
|
||||
//but we set them explicitly when we don't have enough info to rebuild the shard target
|
||||
searchHit.index = index;
|
||||
searchHit.clusterAlias = clusterAlias;
|
||||
}
|
||||
searchHit.score(get(Fields._SCORE, values, DEFAULT_SCORE));
|
||||
searchHit.version(get(Fields._VERSION, values, -1L));
|
||||
searchHit.sortValues(get(Fields.SORT, values, SearchSortValues.EMPTY));
|
||||
|
@ -561,12 +583,7 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable<D
|
|||
searchHit.setInnerHits(get(Fields.INNER_HITS, values, null));
|
||||
List<String> matchedQueries = get(Fields.MATCHED_QUERIES, values, null);
|
||||
if (matchedQueries != null) {
|
||||
searchHit.matchedQueries(matchedQueries.toArray(new String[matchedQueries.size()]));
|
||||
}
|
||||
ShardId shardId = get(Fields._SHARD, values, null);
|
||||
String nodeId = get(Fields._NODE, values, null);
|
||||
if (shardId != null && nodeId != null) {
|
||||
searchHit.shard(new SearchShardTarget(nodeId, shardId, null, OriginalIndices.NONE));
|
||||
searchHit.matchedQueries(matchedQueries.toArray(new String[0]));
|
||||
}
|
||||
return searchHit;
|
||||
}
|
||||
|
@ -602,16 +619,24 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable<D
|
|||
for (String metadatafield : MapperService.getAllMetaFields()) {
|
||||
if (metadatafield.equals(Fields._ID) == false && metadatafield.equals(Fields._INDEX) == false
|
||||
&& metadatafield.equals(Fields._TYPE) == false) {
|
||||
parser.declareField((map, field) -> {
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, DocumentField> fieldMap = (Map<String, DocumentField>) map.computeIfAbsent(Fields.FIELDS,
|
||||
v -> new HashMap<String, DocumentField>());
|
||||
fieldMap.put(field.getName(), field);
|
||||
}, (p, c) -> {
|
||||
List<Object> values = new ArrayList<>();
|
||||
values.add(parseFieldsValue(p));
|
||||
return new DocumentField(metadatafield, values);
|
||||
}, new ParseField(metadatafield), ValueType.VALUE);
|
||||
if (metadatafield.equals(IgnoredFieldMapper.NAME)) {
|
||||
parser.declareObjectArray((map, list) -> {
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, DocumentField> fieldMap = (Map<String, DocumentField>) map.computeIfAbsent(Fields.FIELDS,
|
||||
v -> new HashMap<String, DocumentField>());
|
||||
DocumentField field = new DocumentField(metadatafield, list);
|
||||
fieldMap.put(field.getName(), field);
|
||||
}, (p, c) -> parseFieldsValue(p),
|
||||
new ParseField(metadatafield));
|
||||
} else {
|
||||
parser.declareField((map, field) -> {
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, DocumentField> fieldMap = (Map<String, DocumentField>) map.computeIfAbsent(Fields.FIELDS,
|
||||
v -> new HashMap<String, DocumentField>());
|
||||
fieldMap.put(field.getName(), field);
|
||||
}, (p, c) -> new DocumentField(metadatafield, Collections.singletonList(parseFieldsValue(p))),
|
||||
new ParseField(metadatafield), ValueType.VALUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -834,13 +859,15 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable<D
|
|||
&& Arrays.equals(matchedQueries, other.matchedQueries)
|
||||
&& Objects.equals(explanation, other.explanation)
|
||||
&& Objects.equals(shard, other.shard)
|
||||
&& Objects.equals(innerHits, other.innerHits);
|
||||
&& Objects.equals(innerHits, other.innerHits)
|
||||
&& Objects.equals(index, other.index)
|
||||
&& Objects.equals(clusterAlias, other.clusterAlias);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(id, type, nestedIdentity, version, source, fields, getHighlightFields(), Arrays.hashCode(matchedQueries),
|
||||
explanation, shard, innerHits);
|
||||
explanation, shard, innerHits, index, clusterAlias);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -958,4 +985,9 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable<D
|
|||
return Objects.hash(field, offset, child);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this, true, true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.search;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -30,8 +32,6 @@ import org.elasticsearch.index.Index;
|
|||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.transport.RemoteClusterAware;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* The target that the search request was executed on.
|
||||
*/
|
||||
|
@ -39,7 +39,7 @@ public final class SearchShardTarget implements Writeable, Comparable<SearchShar
|
|||
|
||||
private final Text nodeId;
|
||||
private final ShardId shardId;
|
||||
//original indices and cluster alias are only needed in the coordinating node throughout the search request execution.
|
||||
//original indices are only needed in the coordinating node throughout the search request execution.
|
||||
//no need to serialize them as part of SearchShardTarget.
|
||||
private final transient OriginalIndices originalIndices;
|
||||
private final String clusterAlias;
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
|
|||
import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper;
|
||||
import org.elasticsearch.search.aggregations.support.ValuesSourceType;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
@ -53,7 +54,7 @@ public class AutoDateHistogramAggregationBuilder
|
|||
|
||||
public static final String NAME = "auto_date_histogram";
|
||||
|
||||
public static final ParseField NUM_BUCKETS_FIELD = new ParseField("buckets");
|
||||
private static final ParseField NUM_BUCKETS_FIELD = new ParseField("buckets");
|
||||
|
||||
private static final ObjectParser<AutoDateHistogramAggregationBuilder, Void> PARSER;
|
||||
static {
|
||||
|
@ -63,6 +64,29 @@ public class AutoDateHistogramAggregationBuilder
|
|||
PARSER.declareInt(AutoDateHistogramAggregationBuilder::setNumBuckets, NUM_BUCKETS_FIELD);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Build roundings, computed dynamically as roundings are time zone dependent.
|
||||
* The current implementation probably should not be invoked in a tight loop.
|
||||
* @return Array of RoundingInfo
|
||||
*/
|
||||
static RoundingInfo[] buildRoundings(DateTimeZone timeZone) {
|
||||
RoundingInfo[] roundings = new RoundingInfo[6];
|
||||
roundings[0] = new RoundingInfo(createRounding(DateTimeUnit.SECOND_OF_MINUTE, timeZone),
|
||||
1000L, 1, 5, 10, 30);
|
||||
roundings[1] = new RoundingInfo(createRounding(DateTimeUnit.MINUTES_OF_HOUR, timeZone),
|
||||
60 * 1000L, 1, 5, 10, 30);
|
||||
roundings[2] = new RoundingInfo(createRounding(DateTimeUnit.HOUR_OF_DAY, timeZone),
|
||||
60 * 60 * 1000L, 1, 3, 12);
|
||||
roundings[3] = new RoundingInfo(createRounding(DateTimeUnit.DAY_OF_MONTH, timeZone),
|
||||
24 * 60 * 60 * 1000L, 1, 7);
|
||||
roundings[4] = new RoundingInfo(createRounding(DateTimeUnit.MONTH_OF_YEAR, timeZone),
|
||||
30 * 24 * 60 * 60 * 1000L, 1, 3);
|
||||
roundings[5] = new RoundingInfo(createRounding(DateTimeUnit.YEAR_OF_CENTURY, timeZone),
|
||||
365 * 24 * 60 * 60 * 1000L, 1, 5, 10, 20, 50, 100);
|
||||
return roundings;
|
||||
}
|
||||
|
||||
public static AutoDateHistogramAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, new AutoDateHistogramAggregationBuilder(aggregationName), null);
|
||||
}
|
||||
|
@ -116,14 +140,7 @@ public class AutoDateHistogramAggregationBuilder
|
|||
@Override
|
||||
protected ValuesSourceAggregatorFactory<Numeric, ?> innerBuild(SearchContext context, ValuesSourceConfig<Numeric> config,
|
||||
AggregatorFactory<?> parent, Builder subFactoriesBuilder) throws IOException {
|
||||
RoundingInfo[] roundings = new RoundingInfo[6];
|
||||
roundings[0] = new RoundingInfo(createRounding(DateTimeUnit.SECOND_OF_MINUTE), 1000L, 1, 5, 10, 30);
|
||||
roundings[1] = new RoundingInfo(createRounding(DateTimeUnit.MINUTES_OF_HOUR), 60 * 1000L, 1, 5, 10, 30);
|
||||
roundings[2] = new RoundingInfo(createRounding(DateTimeUnit.HOUR_OF_DAY), 60 * 60 * 1000L, 1, 3, 12);
|
||||
roundings[3] = new RoundingInfo(createRounding(DateTimeUnit.DAY_OF_MONTH), 24 * 60 * 60 * 1000L, 1, 7);
|
||||
roundings[4] = new RoundingInfo(createRounding(DateTimeUnit.MONTH_OF_YEAR), 30 * 24 * 60 * 60 * 1000L, 1, 3);
|
||||
roundings[5] = new RoundingInfo(createRounding(DateTimeUnit.YEAR_OF_CENTURY), 365 * 24 * 60 * 60 * 1000L, 1, 5, 10, 20, 50, 100);
|
||||
|
||||
RoundingInfo[] roundings = buildRoundings(timeZone());
|
||||
int maxRoundingInterval = Arrays.stream(roundings,0, roundings.length-1)
|
||||
.map(rounding -> rounding.innerIntervals)
|
||||
.flatMapToInt(Arrays::stream)
|
||||
|
@ -139,10 +156,10 @@ public class AutoDateHistogramAggregationBuilder
|
|||
return new AutoDateHistogramAggregatorFactory(name, config, numBuckets, roundings, context, parent, subFactoriesBuilder, metaData);
|
||||
}
|
||||
|
||||
private Rounding createRounding(DateTimeUnit interval) {
|
||||
private static Rounding createRounding(DateTimeUnit interval, DateTimeZone timeZone) {
|
||||
Rounding.Builder tzRoundingBuilder = Rounding.builder(interval);
|
||||
if (timeZone() != null) {
|
||||
tzRoundingBuilder.timeZone(timeZone());
|
||||
if (timeZone != null) {
|
||||
tzRoundingBuilder.timeZone(timeZone);
|
||||
}
|
||||
Rounding rounding = tzRoundingBuilder.build();
|
||||
return rounding;
|
||||
|
|
|
@ -447,7 +447,8 @@ public final class InternalAutoDateHistogram extends
|
|||
return new BucketReduceResult(list, roundingInfo, roundingIdx);
|
||||
}
|
||||
|
||||
private int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, RoundingInfo[] roundings) {
|
||||
private int getAppropriateRounding(long minKey, long maxKey, int roundingIdx,
|
||||
RoundingInfo[] roundings) {
|
||||
if (roundingIdx == roundings.length - 1) {
|
||||
return roundingIdx;
|
||||
}
|
||||
|
@ -509,7 +510,8 @@ public final class InternalAutoDateHistogram extends
|
|||
pipelineAggregators(), getMetaData());
|
||||
}
|
||||
|
||||
private BucketReduceResult maybeMergeConsecutiveBuckets(BucketReduceResult reducedBucketsResult, ReduceContext reduceContext) {
|
||||
private BucketReduceResult maybeMergeConsecutiveBuckets(BucketReduceResult reducedBucketsResult,
|
||||
ReduceContext reduceContext) {
|
||||
List<Bucket> buckets = reducedBucketsResult.buckets;
|
||||
RoundingInfo roundingInfo = reducedBucketsResult.roundingInfo;
|
||||
int roundingIdx = reducedBucketsResult.roundingIdx;
|
||||
|
@ -539,7 +541,7 @@ public final class InternalAutoDateHistogram extends
|
|||
key = roundingInfo.rounding.round(bucket.key);
|
||||
}
|
||||
reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket) - 1);
|
||||
sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations));
|
||||
sameKeyedBuckets.add(new Bucket(Math.round(key), bucket.docCount, format, bucket.aggregations));
|
||||
}
|
||||
if (sameKeyedBuckets.isEmpty() == false) {
|
||||
reduceContext.consumeBucketsAndMaybeBreak(1);
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.plugins.PluginsService;
|
|||
import org.elasticsearch.plugins.ReloadablePlugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
|
@ -205,14 +204,7 @@ public class ReloadSecureSettingsIT extends ESIntegTestCase {
|
|||
assertThat(nodesMap.size(), equalTo(cluster().size()));
|
||||
for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) {
|
||||
assertThat(nodeResponse.reloadException(), notNullValue());
|
||||
// Running in a JVM with a BouncyCastle FIPS Security Provider, decrypting the Keystore with the wrong
|
||||
// password returns a SecurityException if the DataInputStream can't be fully consumed
|
||||
if (inFipsJvm()) {
|
||||
assertThat(nodeResponse.reloadException(), instanceOf(SecurityException.class));
|
||||
} else {
|
||||
assertThat(nodeResponse.reloadException(), instanceOf(IOException.class));
|
||||
}
|
||||
|
||||
}
|
||||
} catch (final AssertionError e) {
|
||||
reloadSettingsError.set(e);
|
||||
|
|
|
@ -60,7 +60,12 @@ public class RolloverIT extends ESIntegTestCase {
|
|||
|
||||
|
||||
public void testRolloverOnEmptyIndex() throws Exception {
|
||||
assertAcked(prepareCreate("test_index-1").addAlias(new Alias("test_alias")).get());
|
||||
Alias testAlias = new Alias("test_alias");
|
||||
boolean explicitWriteIndex = randomBoolean();
|
||||
if (explicitWriteIndex) {
|
||||
testAlias.writeIndex(true);
|
||||
}
|
||||
assertAcked(prepareCreate("test_index-1").addAlias(testAlias).get());
|
||||
final RolloverResponse response = client().admin().indices().prepareRolloverIndex("test_alias").get();
|
||||
assertThat(response.getOldIndex(), equalTo("test_index-1"));
|
||||
assertThat(response.getNewIndex(), equalTo("test_index-000002"));
|
||||
|
@ -69,7 +74,12 @@ public class RolloverIT extends ESIntegTestCase {
|
|||
assertThat(response.getConditionStatus().size(), equalTo(0));
|
||||
final ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
final IndexMetaData oldIndex = state.metaData().index("test_index-1");
|
||||
assertFalse(oldIndex.getAliases().containsKey("test_alias"));
|
||||
if (explicitWriteIndex) {
|
||||
assertTrue(oldIndex.getAliases().containsKey("test_alias"));
|
||||
assertFalse(oldIndex.getAliases().get("test_alias").writeIndex());
|
||||
} else {
|
||||
assertFalse(oldIndex.getAliases().containsKey("test_alias"));
|
||||
}
|
||||
final IndexMetaData newIndex = state.metaData().index("test_index-000002");
|
||||
assertTrue(newIndex.getAliases().containsKey("test_alias"));
|
||||
}
|
||||
|
@ -97,8 +107,49 @@ public class RolloverIT extends ESIntegTestCase {
|
|||
is(both(greaterThanOrEqualTo(beforeTime)).and(lessThanOrEqualTo(client().threadPool().absoluteTimeInMillis() + 1000L))));
|
||||
}
|
||||
|
||||
public void testRolloverWithExplicitWriteIndex() throws Exception {
|
||||
long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L;
|
||||
assertAcked(prepareCreate("test_index-2").addAlias(new Alias("test_alias").writeIndex(true)).get());
|
||||
index("test_index-2", "type1", "1", "field", "value");
|
||||
flush("test_index-2");
|
||||
final RolloverResponse response = client().admin().indices().prepareRolloverIndex("test_alias").get();
|
||||
assertThat(response.getOldIndex(), equalTo("test_index-2"));
|
||||
assertThat(response.getNewIndex(), equalTo("test_index-000003"));
|
||||
assertThat(response.isDryRun(), equalTo(false));
|
||||
assertThat(response.isRolledOver(), equalTo(true));
|
||||
assertThat(response.getConditionStatus().size(), equalTo(0));
|
||||
final ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
final IndexMetaData oldIndex = state.metaData().index("test_index-2");
|
||||
assertTrue(oldIndex.getAliases().containsKey("test_alias"));
|
||||
assertFalse(oldIndex.getAliases().get("test_alias").writeIndex());
|
||||
final IndexMetaData newIndex = state.metaData().index("test_index-000003");
|
||||
assertTrue(newIndex.getAliases().containsKey("test_alias"));
|
||||
assertTrue(newIndex.getAliases().get("test_alias").writeIndex());
|
||||
assertThat(oldIndex.getRolloverInfos().size(), equalTo(1));
|
||||
assertThat(oldIndex.getRolloverInfos().get("test_alias").getAlias(), equalTo("test_alias"));
|
||||
assertThat(oldIndex.getRolloverInfos().get("test_alias").getMetConditions(), is(empty()));
|
||||
assertThat(oldIndex.getRolloverInfos().get("test_alias").getTime(),
|
||||
is(both(greaterThanOrEqualTo(beforeTime)).and(lessThanOrEqualTo(client().threadPool().absoluteTimeInMillis() + 1000L))));
|
||||
}
|
||||
|
||||
public void testRolloverWithNoWriteIndex() {
|
||||
Boolean firstIsWriteIndex = randomFrom(false, null);
|
||||
assertAcked(prepareCreate("index1").addAlias(new Alias("alias").writeIndex(firstIsWriteIndex)).get());
|
||||
if (firstIsWriteIndex == null) {
|
||||
assertAcked(prepareCreate("index2").addAlias(new Alias("alias").writeIndex(randomFrom(false, null))).get());
|
||||
}
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class,
|
||||
() -> client().admin().indices().prepareRolloverIndex("alias").dryRun(randomBoolean()).get());
|
||||
assertThat(exception.getMessage(), equalTo("source alias [alias] does not point to a write index"));
|
||||
}
|
||||
|
||||
public void testRolloverWithIndexSettings() throws Exception {
|
||||
assertAcked(prepareCreate("test_index-2").addAlias(new Alias("test_alias")).get());
|
||||
Alias testAlias = new Alias("test_alias");
|
||||
boolean explicitWriteIndex = randomBoolean();
|
||||
if (explicitWriteIndex) {
|
||||
testAlias.writeIndex(true);
|
||||
}
|
||||
assertAcked(prepareCreate("test_index-2").addAlias(testAlias).get());
|
||||
index("test_index-2", "type1", "1", "field", "value");
|
||||
flush("test_index-2");
|
||||
final Settings settings = Settings.builder()
|
||||
|
@ -114,12 +165,17 @@ public class RolloverIT extends ESIntegTestCase {
|
|||
assertThat(response.getConditionStatus().size(), equalTo(0));
|
||||
final ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
final IndexMetaData oldIndex = state.metaData().index("test_index-2");
|
||||
assertFalse(oldIndex.getAliases().containsKey("test_alias"));
|
||||
final IndexMetaData newIndex = state.metaData().index("test_index-000003");
|
||||
assertThat(newIndex.getNumberOfShards(), equalTo(1));
|
||||
assertThat(newIndex.getNumberOfReplicas(), equalTo(0));
|
||||
assertTrue(newIndex.getAliases().containsKey("test_alias"));
|
||||
assertTrue(newIndex.getAliases().containsKey("extra_alias"));
|
||||
if (explicitWriteIndex) {
|
||||
assertFalse(oldIndex.getAliases().get("test_alias").writeIndex());
|
||||
assertTrue(newIndex.getAliases().get("test_alias").writeIndex());
|
||||
} else {
|
||||
assertFalse(oldIndex.getAliases().containsKey("test_alias"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testRolloverDryRun() throws Exception {
|
||||
|
@ -140,7 +196,12 @@ public class RolloverIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testRolloverConditionsNotMet() throws Exception {
|
||||
assertAcked(prepareCreate("test_index-0").addAlias(new Alias("test_alias")).get());
|
||||
boolean explicitWriteIndex = randomBoolean();
|
||||
Alias testAlias = new Alias("test_alias");
|
||||
if (explicitWriteIndex) {
|
||||
testAlias.writeIndex(true);
|
||||
}
|
||||
assertAcked(prepareCreate("test_index-0").addAlias(testAlias).get());
|
||||
index("test_index-0", "type1", "1", "field", "value");
|
||||
flush("test_index-0");
|
||||
final RolloverResponse response = client().admin().indices().prepareRolloverIndex("test_alias")
|
||||
|
@ -160,12 +221,22 @@ public class RolloverIT extends ESIntegTestCase {
|
|||
final ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
final IndexMetaData oldIndex = state.metaData().index("test_index-0");
|
||||
assertTrue(oldIndex.getAliases().containsKey("test_alias"));
|
||||
if (explicitWriteIndex) {
|
||||
assertTrue(oldIndex.getAliases().get("test_alias").writeIndex());
|
||||
} else {
|
||||
assertNull(oldIndex.getAliases().get("test_alias").writeIndex());
|
||||
}
|
||||
final IndexMetaData newIndex = state.metaData().index("test_index-000001");
|
||||
assertNull(newIndex);
|
||||
}
|
||||
|
||||
public void testRolloverWithNewIndexName() throws Exception {
|
||||
assertAcked(prepareCreate("test_index").addAlias(new Alias("test_alias")).get());
|
||||
Alias testAlias = new Alias("test_alias");
|
||||
boolean explicitWriteIndex = randomBoolean();
|
||||
if (explicitWriteIndex) {
|
||||
testAlias.writeIndex(true);
|
||||
}
|
||||
assertAcked(prepareCreate("test_index").addAlias(testAlias).get());
|
||||
index("test_index", "type1", "1", "field", "value");
|
||||
flush("test_index");
|
||||
final RolloverResponse response = client().admin().indices().prepareRolloverIndex("test_alias")
|
||||
|
@ -177,9 +248,14 @@ public class RolloverIT extends ESIntegTestCase {
|
|||
assertThat(response.getConditionStatus().size(), equalTo(0));
|
||||
final ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
final IndexMetaData oldIndex = state.metaData().index("test_index");
|
||||
assertFalse(oldIndex.getAliases().containsKey("test_alias"));
|
||||
final IndexMetaData newIndex = state.metaData().index("test_new_index");
|
||||
assertTrue(newIndex.getAliases().containsKey("test_alias"));
|
||||
if (explicitWriteIndex) {
|
||||
assertFalse(oldIndex.getAliases().get("test_alias").writeIndex());
|
||||
assertTrue(newIndex.getAliases().get("test_alias").writeIndex());
|
||||
} else {
|
||||
assertFalse(oldIndex.getAliases().containsKey("test_alias"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testRolloverOnExistingIndex() throws Exception {
|
||||
|
|
|
@ -172,39 +172,75 @@ public class TransportRolloverActionTests extends ESTestCase {
|
|||
assertTrue(foundRemove);
|
||||
}
|
||||
|
||||
public void testCreateUpdateAliasRequestWithExplicitWriteIndex() {
|
||||
String sourceAlias = randomAlphaOfLength(10);
|
||||
String sourceIndex = randomAlphaOfLength(10);
|
||||
String targetIndex = randomAlphaOfLength(10);
|
||||
final RolloverRequest rolloverRequest = new RolloverRequest(sourceAlias, targetIndex);
|
||||
final IndicesAliasesClusterStateUpdateRequest updateRequest =
|
||||
TransportRolloverAction.prepareRolloverAliasesWriteIndexUpdateRequest(sourceIndex, targetIndex, rolloverRequest);
|
||||
|
||||
List<AliasAction> actions = updateRequest.actions();
|
||||
assertThat(actions, hasSize(2));
|
||||
boolean foundAddWrite = false;
|
||||
boolean foundRemoveWrite = false;
|
||||
for (AliasAction action : actions) {
|
||||
AliasAction.Add addAction = (AliasAction.Add) action;
|
||||
if (action.getIndex().equals(targetIndex)) {
|
||||
assertEquals(sourceAlias, addAction.getAlias());
|
||||
assertTrue(addAction.writeIndex());
|
||||
foundAddWrite = true;
|
||||
} else if (action.getIndex().equals(sourceIndex)) {
|
||||
assertEquals(sourceAlias, addAction.getAlias());
|
||||
assertFalse(addAction.writeIndex());
|
||||
foundRemoveWrite = true;
|
||||
} else {
|
||||
throw new AssertionError("Unknow index [" + action.getIndex() + "]");
|
||||
}
|
||||
}
|
||||
assertTrue(foundAddWrite);
|
||||
assertTrue(foundRemoveWrite);
|
||||
}
|
||||
|
||||
public void testValidation() {
|
||||
String index1 = randomAlphaOfLength(10);
|
||||
String alias = randomAlphaOfLength(10);
|
||||
String aliasWithWriteIndex = randomAlphaOfLength(10);
|
||||
String index2 = randomAlphaOfLength(10);
|
||||
String aliasWithMultipleIndices = randomAlphaOfLength(10);
|
||||
String aliasWithNoWriteIndex = randomAlphaOfLength(10);
|
||||
Boolean firstIsWriteIndex = randomFrom(false, null);
|
||||
final Settings settings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build();
|
||||
final MetaData metaData = MetaData.builder()
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder()
|
||||
.put(IndexMetaData.builder(index1)
|
||||
.settings(settings)
|
||||
.putAlias(AliasMetaData.builder(alias))
|
||||
.putAlias(AliasMetaData.builder(aliasWithMultipleIndices))
|
||||
)
|
||||
.put(IndexMetaData.builder(index2)
|
||||
.settings(settings)
|
||||
.putAlias(AliasMetaData.builder(aliasWithMultipleIndices))
|
||||
).build();
|
||||
.putAlias(AliasMetaData.builder(aliasWithWriteIndex))
|
||||
.putAlias(AliasMetaData.builder(aliasWithNoWriteIndex).writeIndex(firstIsWriteIndex))
|
||||
);
|
||||
IndexMetaData.Builder indexTwoBuilder = IndexMetaData.builder(index2).settings(settings);
|
||||
if (firstIsWriteIndex == null) {
|
||||
indexTwoBuilder.putAlias(AliasMetaData.builder(aliasWithNoWriteIndex).writeIndex(randomFrom(false, null)));
|
||||
}
|
||||
metaDataBuilder.put(indexTwoBuilder);
|
||||
MetaData metaData = metaDataBuilder.build();
|
||||
|
||||
expectThrows(IllegalArgumentException.class, () ->
|
||||
TransportRolloverAction.validate(metaData, new RolloverRequest(aliasWithMultipleIndices,
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () ->
|
||||
TransportRolloverAction.validate(metaData, new RolloverRequest(aliasWithNoWriteIndex,
|
||||
randomAlphaOfLength(10))));
|
||||
expectThrows(IllegalArgumentException.class, () ->
|
||||
assertThat(exception.getMessage(), equalTo("source alias [" + aliasWithNoWriteIndex + "] does not point to a write index"));
|
||||
exception = expectThrows(IllegalArgumentException.class, () ->
|
||||
TransportRolloverAction.validate(metaData, new RolloverRequest(randomFrom(index1, index2),
|
||||
randomAlphaOfLength(10))));
|
||||
expectThrows(IllegalArgumentException.class, () ->
|
||||
assertThat(exception.getMessage(), equalTo("source alias is a concrete index"));
|
||||
exception = expectThrows(IllegalArgumentException.class, () ->
|
||||
TransportRolloverAction.validate(metaData, new RolloverRequest(randomAlphaOfLength(5),
|
||||
randomAlphaOfLength(10)))
|
||||
);
|
||||
TransportRolloverAction.validate(metaData, new RolloverRequest(alias, randomAlphaOfLength(10)));
|
||||
assertThat(exception.getMessage(), equalTo("source alias does not exist"));
|
||||
TransportRolloverAction.validate(metaData, new RolloverRequest(aliasWithWriteIndex, randomAlphaOfLength(10)));
|
||||
}
|
||||
|
||||
public void testGenerateRolloverIndexName() {
|
||||
|
@ -248,7 +284,7 @@ public class TransportRolloverActionTests extends ESTestCase {
|
|||
public void testRejectDuplicateAlias() {
|
||||
final IndexTemplateMetaData template = IndexTemplateMetaData.builder("test-template")
|
||||
.patterns(Arrays.asList("foo-*", "bar-*"))
|
||||
.putAlias(AliasMetaData.builder("foo-write")).putAlias(AliasMetaData.builder("bar-write"))
|
||||
.putAlias(AliasMetaData.builder("foo-write")).putAlias(AliasMetaData.builder("bar-write").writeIndex(randomBoolean()))
|
||||
.build();
|
||||
final MetaData metaData = MetaData.builder().put(createMetaData(), false).put(template).build();
|
||||
String indexName = randomFrom("foo-123", "bar-xyz");
|
||||
|
|
|
@ -27,10 +27,12 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.AllocationId;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable.Builder;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
|
@ -44,6 +46,7 @@ import java.util.Collections;
|
|||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
|
@ -93,7 +96,8 @@ public class ClusterStateCreationUtils {
|
|||
IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder()
|
||||
.put(SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)
|
||||
.put(SETTING_CREATION_DATE, System.currentTimeMillis())).primaryTerm(0, primaryTerm).build();
|
||||
.put(SETTING_CREATION_DATE, System.currentTimeMillis())).primaryTerm(0, primaryTerm)
|
||||
.build();
|
||||
|
||||
RoutingTable.Builder routing = new RoutingTable.Builder();
|
||||
routing.addAsNew(indexMetaData);
|
||||
|
@ -138,12 +142,19 @@ public class ClusterStateCreationUtils {
|
|||
TestShardRouting.newShardRouting(index, shardId.id(), replicaNode, relocatingNode, false, replicaState,
|
||||
unassignedInfo));
|
||||
}
|
||||
final IndexShardRoutingTable indexShardRoutingTable = indexShardRoutingBuilder.build();
|
||||
|
||||
IndexMetaData.Builder indexMetaDataBuilder = new IndexMetaData.Builder(indexMetaData);
|
||||
indexMetaDataBuilder.putInSyncAllocationIds(0,
|
||||
indexShardRoutingTable.activeShards().stream().map(ShardRouting::allocationId).map(AllocationId::getId)
|
||||
.collect(Collectors.toSet())
|
||||
);
|
||||
|
||||
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
|
||||
state.nodes(discoBuilder);
|
||||
state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded());
|
||||
state.metaData(MetaData.builder().put(indexMetaDataBuilder.build(), false).generateClusterUuidIfNeeded());
|
||||
state.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(indexMetaData.getIndex())
|
||||
.addIndexShard(indexShardRoutingBuilder.build())).build());
|
||||
.addIndexShard(indexShardRoutingTable)).build());
|
||||
return state.build();
|
||||
}
|
||||
|
||||
|
@ -272,21 +283,21 @@ public class ClusterStateCreationUtils {
|
|||
state.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build());
|
||||
return state.build();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Creates cluster state with several indexes, shards and replicas and all shards STARTED.
|
||||
*/
|
||||
public static ClusterState stateWithAssignedPrimariesAndReplicas(String[] indices, int numberOfShards, int numberOfReplicas) {
|
||||
|
||||
int numberOfDataNodes = numberOfReplicas + 1;
|
||||
int numberOfDataNodes = numberOfReplicas + 1;
|
||||
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
|
||||
for (int i = 0; i < numberOfDataNodes + 1; i++) {
|
||||
final DiscoveryNode node = newNode(i);
|
||||
discoBuilder = discoBuilder.add(node);
|
||||
}
|
||||
discoBuilder.localNodeId(newNode(0).getId());
|
||||
discoBuilder.masterNodeId(newNode(numberOfDataNodes + 1).getId());
|
||||
discoBuilder.masterNodeId(newNode(numberOfDataNodes + 1).getId());
|
||||
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
|
||||
state.nodes(discoBuilder);
|
||||
Builder routingTableBuilder = RoutingTable.builder();
|
||||
|
@ -316,7 +327,7 @@ public class ClusterStateCreationUtils {
|
|||
state.metaData(metadataBuilder);
|
||||
state.routingTable(routingTableBuilder.build());
|
||||
return state.build();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates cluster state with and index that has one shard and as many replicas as numberOfReplicas.
|
||||
|
|
|
@ -131,7 +131,7 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e =
|
||||
expectThrows(NodeValidationException.class,
|
||||
() -> BootstrapChecks.check(defaultContext, true, checks, "testExceptionAggregation"));
|
||||
() -> BootstrapChecks.check(defaultContext, true, checks));
|
||||
assertThat(e, hasToString(allOf(containsString("bootstrap checks failed"), containsString("first"), containsString("second"))));
|
||||
final Throwable[] suppressed = e.getSuppressed();
|
||||
assertThat(suppressed.length, equalTo(2));
|
||||
|
@ -162,7 +162,7 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
final NodeValidationException e =
|
||||
expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testHeapSizeCheck"));
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)));
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
containsString("initial heap size [" + initialHeapSize.get() + "] " +
|
||||
|
@ -170,7 +170,7 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
|
||||
initialHeapSize.set(maxHeapSize.get());
|
||||
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testHeapSizeCheck");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
|
||||
// nothing should happen if the initial heap size or the max
|
||||
// heap size is not available
|
||||
|
@ -179,7 +179,7 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
} else {
|
||||
maxHeapSize.set(0);
|
||||
}
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testHeapSizeCheck");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
}
|
||||
|
||||
public void testFileDescriptorLimits() throws NodeValidationException {
|
||||
|
@ -205,17 +205,17 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e =
|
||||
expectThrows(NodeValidationException.class,
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testFileDescriptorLimits"));
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)));
|
||||
assertThat(e.getMessage(), containsString("max file descriptors"));
|
||||
|
||||
maxFileDescriptorCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
|
||||
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testFileDescriptorLimits");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
|
||||
// nothing should happen if current file descriptor count is
|
||||
// not available
|
||||
maxFileDescriptorCount.set(-1);
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testFileDescriptorLimits");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
}
|
||||
|
||||
public void testFileDescriptorLimitsThrowsOnInvalidLimit() {
|
||||
|
@ -262,15 +262,13 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
() -> BootstrapChecks.check(
|
||||
bootstrapContext,
|
||||
true,
|
||||
Collections.singletonList(check),
|
||||
"testFileDescriptorLimitsThrowsOnInvalidLimit"));
|
||||
Collections.singletonList(check)));
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
containsString("memory locking requested for elasticsearch process but memory is not locked"));
|
||||
} else {
|
||||
// nothing should happen
|
||||
BootstrapChecks.check(bootstrapContext, true, Collections.singletonList(check),
|
||||
"testFileDescriptorLimitsThrowsOnInvalidLimit");
|
||||
BootstrapChecks.check(bootstrapContext, true, Collections.singletonList(check));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -287,17 +285,17 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck"));
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)));
|
||||
assertThat(e.getMessage(), containsString("max number of threads"));
|
||||
|
||||
maxNumberOfThreads.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
|
||||
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
|
||||
// nothing should happen if current max number of threads is
|
||||
// not available
|
||||
maxNumberOfThreads.set(-1);
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
}
|
||||
|
||||
public void testMaxSizeVirtualMemory() throws NodeValidationException {
|
||||
|
@ -317,16 +315,16 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxSizeVirtualMemory"));
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)));
|
||||
assertThat(e.getMessage(), containsString("max size virtual memory"));
|
||||
|
||||
maxSizeVirtualMemory.set(rlimInfinity);
|
||||
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxSizeVirtualMemory");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
|
||||
// nothing should happen if max size virtual memory is not available
|
||||
maxSizeVirtualMemory.set(Long.MIN_VALUE);
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxSizeVirtualMemory");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
}
|
||||
|
||||
public void testMaxFileSizeCheck() throws NodeValidationException {
|
||||
|
@ -346,16 +344,16 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxFileSize"));
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)));
|
||||
assertThat(e.getMessage(), containsString("max file size"));
|
||||
|
||||
maxFileSize.set(rlimInfinity);
|
||||
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxFileSize");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
|
||||
// nothing should happen if max file size is not available
|
||||
maxFileSize.set(Long.MIN_VALUE);
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxFileSize");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
}
|
||||
|
||||
public void testMaxMapCountCheck() throws NodeValidationException {
|
||||
|
@ -370,17 +368,17 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxMapCountCheck"));
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)));
|
||||
assertThat(e.getMessage(), containsString("max virtual memory areas vm.max_map_count"));
|
||||
|
||||
maxMapCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
|
||||
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxMapCountCheck");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
|
||||
// nothing should happen if current vm.max_map_count is not
|
||||
// available
|
||||
maxMapCount.set(-1);
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testMaxMapCountCheck");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
}
|
||||
|
||||
public void testClientJvmCheck() throws NodeValidationException {
|
||||
|
@ -394,14 +392,14 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testClientJvmCheck"));
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)));
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
containsString("JVM is using the client VM [Java HotSpot(TM) 32-Bit Client VM] " +
|
||||
"but should be using a server VM for the best performance"));
|
||||
|
||||
vmName.set("Java HotSpot(TM) 32-Bit Server VM");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testClientJvmCheck");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
}
|
||||
|
||||
public void testUseSerialGCCheck() throws NodeValidationException {
|
||||
|
@ -415,14 +413,14 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testUseSerialGCCheck"));
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(check)));
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
containsString("JVM is using the serial collector but should not be for the best performance; " + "" +
|
||||
"either it's the default for the VM [" + JvmInfo.jvmInfo().getVmName() +"] or -XX:+UseSerialGC was explicitly specified"));
|
||||
|
||||
useSerialGC.set("false");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), "testUseSerialGCCheck");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
}
|
||||
|
||||
public void testSystemCallFilterCheck() throws NodeValidationException {
|
||||
|
@ -439,15 +437,14 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapChecks.check(context, true, Collections.singletonList(systemCallFilterEnabledCheck),
|
||||
"testSystemCallFilterCheck"));
|
||||
() -> BootstrapChecks.check(context, true, Collections.singletonList(systemCallFilterEnabledCheck)));
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
containsString("system call filters failed to install; " +
|
||||
"check the logs and fix your configuration or disable system call filters at your own risk"));
|
||||
|
||||
isSystemCallFilterInstalled.set(true);
|
||||
BootstrapChecks.check(context, true, Collections.singletonList(systemCallFilterEnabledCheck), "testSystemCallFilterCheck");
|
||||
BootstrapChecks.check(context, true, Collections.singletonList(systemCallFilterEnabledCheck));
|
||||
BootstrapContext context_1 = new BootstrapContext(Settings.builder().put("bootstrap.system_call_filter", false).build(), null);
|
||||
final BootstrapChecks.SystemCallFilterCheck systemCallFilterNotEnabledCheck = new BootstrapChecks.SystemCallFilterCheck() {
|
||||
@Override
|
||||
|
@ -456,9 +453,9 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
}
|
||||
};
|
||||
isSystemCallFilterInstalled.set(false);
|
||||
BootstrapChecks.check(context_1, true, Collections.singletonList(systemCallFilterNotEnabledCheck), "testSystemCallFilterCheck");
|
||||
BootstrapChecks.check(context_1, true, Collections.singletonList(systemCallFilterNotEnabledCheck));
|
||||
isSystemCallFilterInstalled.set(true);
|
||||
BootstrapChecks.check(context_1, true, Collections.singletonList(systemCallFilterNotEnabledCheck), "testSystemCallFilterCheck");
|
||||
BootstrapChecks.check(context_1, true, Collections.singletonList(systemCallFilterNotEnabledCheck));
|
||||
}
|
||||
|
||||
public void testMightForkCheck() throws NodeValidationException {
|
||||
|
@ -553,8 +550,6 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
final Runnable enableMightFork,
|
||||
final Consumer<NodeValidationException> consumer) throws NodeValidationException {
|
||||
|
||||
final String methodName = Thread.currentThread().getStackTrace()[2].getMethodName();
|
||||
|
||||
// if system call filter is disabled, nothing should happen
|
||||
isSystemCallFilterInstalled.set(false);
|
||||
if (randomBoolean()) {
|
||||
|
@ -562,13 +557,13 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
} else {
|
||||
enableMightFork.run();
|
||||
}
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), methodName);
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
|
||||
// if system call filter is enabled, but we will not fork, nothing should
|
||||
// happen
|
||||
isSystemCallFilterInstalled.set(true);
|
||||
disableMightFork.run();
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check), methodName);
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(check));
|
||||
|
||||
// if system call filter is enabled, and we might fork, the check should be enforced, regardless of bootstrap checks being enabled
|
||||
// or not
|
||||
|
@ -577,7 +572,7 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapChecks.check(defaultContext, randomBoolean(), Collections.singletonList(check), methodName));
|
||||
() -> BootstrapChecks.check(defaultContext, randomBoolean(), Collections.singletonList(check)));
|
||||
consumer.accept(e);
|
||||
}
|
||||
|
||||
|
@ -602,7 +597,7 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> {
|
||||
BootstrapChecks.check(defaultContext, true, checks, "testEarlyAccessCheck");
|
||||
BootstrapChecks.check(defaultContext, true, checks);
|
||||
});
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
|
@ -613,7 +608,7 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
|
||||
// if not on an early-access build, nothing should happen
|
||||
javaVersion.set(randomFrom("1.8.0_152", "9"));
|
||||
BootstrapChecks.check(defaultContext, true, checks, "testEarlyAccessCheck");
|
||||
BootstrapChecks.check(defaultContext, true, checks);
|
||||
|
||||
}
|
||||
|
||||
|
@ -649,7 +644,7 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
final NodeValidationException e =
|
||||
expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(g1GCCheck), "testG1GCCheck"));
|
||||
() -> BootstrapChecks.check(defaultContext, true, Collections.singletonList(g1GCCheck)));
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
containsString(
|
||||
|
@ -657,12 +652,12 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
|
||||
// if G1GC is disabled, nothing should happen
|
||||
isG1GCEnabled.set(false);
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(g1GCCheck), "testG1GCCheck");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(g1GCCheck));
|
||||
|
||||
// if on or after update 40, nothing should happen independent of whether or not G1GC is enabled
|
||||
isG1GCEnabled.set(randomBoolean());
|
||||
jvmVersion.set(String.format(Locale.ROOT, "25.%d-b%d", randomIntBetween(40, 112), randomIntBetween(1, 128)));
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(g1GCCheck), "testG1GCCheck");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(g1GCCheck));
|
||||
|
||||
final BootstrapChecks.G1GCCheck nonOracleCheck = new BootstrapChecks.G1GCCheck() {
|
||||
|
||||
|
@ -674,7 +669,7 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
};
|
||||
|
||||
// if not on an Oracle JVM, nothing should happen
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(nonOracleCheck), "testG1GCCheck");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(nonOracleCheck));
|
||||
|
||||
final BootstrapChecks.G1GCCheck nonJava8Check = new BootstrapChecks.G1GCCheck() {
|
||||
|
||||
|
@ -686,7 +681,7 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
};
|
||||
|
||||
// if not Java 8, nothing should happen
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(nonJava8Check), "testG1GCCheck");
|
||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(nonJava8Check));
|
||||
}
|
||||
|
||||
public void testAllPermissionCheck() throws NodeValidationException {
|
||||
|
@ -701,12 +696,12 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
final List<BootstrapCheck> checks = Collections.singletonList(allPermissionCheck);
|
||||
final NodeValidationException e = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapChecks.check(defaultContext, true, checks, "testIsAllPermissionCheck"));
|
||||
() -> BootstrapChecks.check(defaultContext, true, checks));
|
||||
assertThat(e, hasToString(containsString("granting the all permission effectively disables security")));
|
||||
|
||||
// if all permissions are not granted, nothing should happen
|
||||
isAllPermissionGranted.set(false);
|
||||
BootstrapChecks.check(defaultContext, true, checks, "testIsAllPermissionCheck");
|
||||
BootstrapChecks.check(defaultContext, true, checks);
|
||||
}
|
||||
|
||||
public void testAlwaysEnforcedChecks() {
|
||||
|
@ -724,7 +719,7 @@ public class BootstrapChecksTests extends ESTestCase {
|
|||
|
||||
final NodeValidationException alwaysEnforced = expectThrows(
|
||||
NodeValidationException.class,
|
||||
() -> BootstrapChecks.check(defaultContext, randomBoolean(), Collections.singletonList(check), "testAlwaysEnforcedChecks"));
|
||||
() -> BootstrapChecks.check(defaultContext, randomBoolean(), Collections.singletonList(check)));
|
||||
assertThat(alwaysEnforced, hasToString(containsString("error")));
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ public class ElasticsearchUncaughtExceptionHandlerTests extends ESTestCase {
|
|||
final AtomicInteger observedStatus = new AtomicInteger();
|
||||
final AtomicReference<String> threadNameReference = new AtomicReference<>();
|
||||
final AtomicReference<Throwable> throwableReference = new AtomicReference<>();
|
||||
thread.setUncaughtExceptionHandler(new ElasticsearchUncaughtExceptionHandler(() -> "testUncaughtError") {
|
||||
thread.setUncaughtExceptionHandler(new ElasticsearchUncaughtExceptionHandler() {
|
||||
|
||||
@Override
|
||||
void halt(int status) {
|
||||
|
@ -106,7 +106,7 @@ public class ElasticsearchUncaughtExceptionHandlerTests extends ESTestCase {
|
|||
thread.setName(name);
|
||||
final AtomicReference<String> threadNameReference = new AtomicReference<>();
|
||||
final AtomicReference<Throwable> throwableReference = new AtomicReference<>();
|
||||
thread.setUncaughtExceptionHandler(new ElasticsearchUncaughtExceptionHandler(() -> "testUncaughtException") {
|
||||
thread.setUncaughtExceptionHandler(new ElasticsearchUncaughtExceptionHandler() {
|
||||
@Override
|
||||
void halt(int status) {
|
||||
fail();
|
||||
|
|
|
@ -143,4 +143,22 @@ public class PolygonBuilderTests extends AbstractShapeBuilderTestCase<PolygonBui
|
|||
|
||||
assertEquals("Hole lies outside shell at or near point (4.0, 3.0, NaN)", e.getMessage());
|
||||
}
|
||||
|
||||
public void testWidePolygonWithConfusingOrientation() {
|
||||
// A valid polygon that is oriented correctly (anticlockwise) but which
|
||||
// confounds a naive algorithm for determining its orientation leading
|
||||
// ES to believe that it crosses the dateline and "fixing" it in a way
|
||||
// that self-intersects.
|
||||
|
||||
PolygonBuilder pb = new PolygonBuilder(new CoordinatesBuilder()
|
||||
.coordinate(10, -20).coordinate(100, 0).coordinate(-100, 0).coordinate(20, -45).coordinate(40, -60).close());
|
||||
pb.build(); // Should not throw an exception
|
||||
}
|
||||
|
||||
public void testPolygonWithUndefinedOrientationDueToCollinearPoints() {
|
||||
PolygonBuilder pb = new PolygonBuilder(new CoordinatesBuilder()
|
||||
.coordinate(0.0, 0.0).coordinate(1.0, 1.0).coordinate(-1.0, -1.0).close());
|
||||
InvalidShapeException e = expectThrows(InvalidShapeException.class, pb::build);
|
||||
assertEquals("Cannot determine orientation: edges adjacent to (-1.0,-1.0) coincide", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -99,6 +99,15 @@ public class KeyStoreWrapperTests extends ESTestCase {
|
|||
assertTrue(keystore.getSettingNames().contains(KeyStoreWrapper.SEED_SETTING.getKey()));
|
||||
}
|
||||
|
||||
public void testDecryptKeyStoreWithWrongPassword() throws Exception {
|
||||
KeyStoreWrapper keystore = KeyStoreWrapper.create();
|
||||
keystore.save(env.configFile(), new char[0]);
|
||||
final KeyStoreWrapper loadedkeystore = KeyStoreWrapper.load(env.configFile());
|
||||
final SecurityException exception = expectThrows(SecurityException.class,
|
||||
() -> loadedkeystore.decrypt(new char[]{'i', 'n', 'v', 'a', 'l', 'i', 'd'}));
|
||||
assertThat(exception.getMessage(), containsString("Keystore has been corrupted or tampered with"));
|
||||
}
|
||||
|
||||
public void testCannotReadStringFromClosedKeystore() throws Exception {
|
||||
KeyStoreWrapper keystore = KeyStoreWrapper.create();
|
||||
assertThat(keystore.getSettingNames(), Matchers.hasItem(KeyStoreWrapper.SEED_SETTING.getKey()));
|
||||
|
|
|
@ -1978,7 +1978,7 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
@Override
|
||||
public void append(LogEvent event) {
|
||||
final String formattedMessage = event.getMessage().getFormattedMessage();
|
||||
if (event.getLevel() == Level.TRACE && event.getMarker().getName().contains("[index][0] ")) {
|
||||
if (event.getLevel() == Level.TRACE && event.getMarker().getName().contains("[index][0]")) {
|
||||
if (event.getLoggerName().endsWith(".IW") &&
|
||||
formattedMessage.contains("IW: now apply all deletes")) {
|
||||
sawIndexWriterMessage = true;
|
||||
|
|
|
@ -26,7 +26,11 @@ import org.elasticsearch.common.document.DocumentField;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.IgnoredFieldMapper;
|
||||
import org.elasticsearch.index.mapper.IndexFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.TypeFieldMapper;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.RandomObjects;
|
||||
|
||||
|
@ -98,14 +102,28 @@ public class DocumentFieldTests extends ESTestCase {
|
|||
|
||||
public static Tuple<DocumentField, DocumentField> randomDocumentField(XContentType xContentType) {
|
||||
if (randomBoolean()) {
|
||||
String fieldName = randomFrom(RoutingFieldMapper.NAME);
|
||||
DocumentField documentField = new DocumentField(fieldName, Collections.singletonList(randomAlphaOfLengthBetween(3, 10)));
|
||||
String metaField = randomValueOtherThanMany(field -> field.equals(TypeFieldMapper.NAME)
|
||||
|| field.equals(IndexFieldMapper.NAME) || field.equals(IdFieldMapper.NAME),
|
||||
() -> randomFrom(MapperService.getAllMetaFields()));
|
||||
DocumentField documentField;
|
||||
if (metaField.equals(IgnoredFieldMapper.NAME)) {
|
||||
int numValues = randomIntBetween(1, 3);
|
||||
List<Object> ignoredFields = new ArrayList<>(numValues);
|
||||
for (int i = 0; i < numValues; i++) {
|
||||
ignoredFields.add(randomAlphaOfLengthBetween(3, 10));
|
||||
}
|
||||
documentField = new DocumentField(metaField, ignoredFields);
|
||||
} else {
|
||||
//meta fields are single value only, besides _ignored
|
||||
documentField = new DocumentField(metaField, Collections.singletonList(randomAlphaOfLengthBetween(3, 10)));
|
||||
}
|
||||
return Tuple.tuple(documentField, documentField);
|
||||
} else {
|
||||
String fieldName = randomAlphaOfLengthBetween(3, 10);
|
||||
Tuple<List<Object>, List<Object>> tuple = RandomObjects.randomStoredFieldValues(random(), xContentType);
|
||||
DocumentField input = new DocumentField(fieldName, tuple.v1());
|
||||
DocumentField expected = new DocumentField(fieldName, tuple.v2());
|
||||
return Tuple.tuple(input, expected);
|
||||
}
|
||||
String fieldName = randomAlphaOfLengthBetween(3, 10);
|
||||
Tuple<List<Object>, List<Object>> tuple = RandomObjects.randomStoredFieldValues(random(), xContentType);
|
||||
DocumentField input = new DocumentField(fieldName, tuple.v1());
|
||||
DocumentField expected = new DocumentField(fieldName, tuple.v2());
|
||||
return Tuple.tuple(input, expected);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,7 +90,6 @@ public class GetResultTests extends ESTestCase {
|
|||
XContentType xContentType = randomFrom(XContentType.values());
|
||||
Tuple<GetResult, GetResult> tuple = randomGetResult(xContentType);
|
||||
GetResult getResult = tuple.v1();
|
||||
|
||||
// We don't expect to retrieve the index/type/id of the GetResult because they are not rendered
|
||||
// by the toXContentEmbedded method.
|
||||
GetResult expectedGetResult = new GetResult(null, null, null, -1,
|
||||
|
@ -106,7 +105,6 @@ public class GetResultTests extends ESTestCase {
|
|||
parsedEmbeddedGetResult = GetResult.fromXContentEmbedded(parser);
|
||||
assertNull(parser.nextToken());
|
||||
}
|
||||
|
||||
assertEquals(expectedGetResult, parsedEmbeddedGetResult);
|
||||
//print the parsed object out and test that the output is the same as the original output
|
||||
BytesReference finalBytes = toXContentEmbedded(parsedEmbeddedGetResult, xContentType, humanReadable);
|
||||
|
@ -203,16 +201,17 @@ public class GetResultTests extends ESTestCase {
|
|||
return Tuple.tuple(getResult, expectedGetResult);
|
||||
}
|
||||
|
||||
private static Tuple<Map<String, DocumentField>,Map<String, DocumentField>> randomDocumentFields(XContentType xContentType) {
|
||||
public static Tuple<Map<String, DocumentField>,Map<String, DocumentField>> randomDocumentFields(XContentType xContentType) {
|
||||
int numFields = randomIntBetween(2, 10);
|
||||
Map<String, DocumentField> fields = new HashMap<>(numFields);
|
||||
Map<String, DocumentField> expectedFields = new HashMap<>(numFields);
|
||||
for (int i = 0; i < numFields; i++) {
|
||||
while (fields.size() < numFields) {
|
||||
Tuple<DocumentField, DocumentField> tuple = randomDocumentField(xContentType);
|
||||
DocumentField getField = tuple.v1();
|
||||
DocumentField expectedGetField = tuple.v2();
|
||||
fields.put(getField.getName(), getField);
|
||||
expectedFields.put(expectedGetField.getName(), expectedGetField);
|
||||
if (fields.putIfAbsent(getField.getName(), getField) == null) {
|
||||
assertNull(expectedFields.putIfAbsent(expectedGetField.getName(), expectedGetField));
|
||||
}
|
||||
}
|
||||
return Tuple.tuple(fields, expectedFields);
|
||||
}
|
||||
|
|
|
@ -74,6 +74,10 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC
|
|||
enableRandomFailures = randomBoolean();
|
||||
}
|
||||
|
||||
protected void disableRandomFailures() {
|
||||
enableRandomFailures = false;
|
||||
}
|
||||
|
||||
protected void failRandomly() {
|
||||
if (enableRandomFailures && rarely()) {
|
||||
throw new RuntimeException("dummy test failure");
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.elasticsearch.common.util.set.Sets;
|
|||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.PrimaryReplicaSyncer;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
||||
import org.elasticsearch.repositories.RepositoriesService;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
|
@ -75,6 +76,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPA
|
|||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
|
@ -97,7 +99,6 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
|
|||
terminate(threadPool);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32308")
|
||||
public void testRandomClusterStateUpdates() {
|
||||
// we have an IndicesClusterStateService per node in the cluster
|
||||
final Map<DiscoveryNode, IndicesClusterStateService> clusterStateServiceMap = new HashMap<>();
|
||||
|
@ -199,6 +200,59 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* In rare cases it is possible that a nodes gets an instruction to replace a replica
|
||||
* shard that's in POST_RECOVERY with a new initializing primary with the same allocation id.
|
||||
* This can happen by batching cluster states that include the starting of the replica, with
|
||||
* closing of the indices, opening it up again and allocating the primary shard to the node in
|
||||
* question. The node should then clean it's initializing replica and replace it with a new
|
||||
* initializing primary.
|
||||
*/
|
||||
public void testInitializingPrimaryRemovesInitializingReplicaWithSameAID() {
|
||||
disableRandomFailures();
|
||||
String index = "index_" + randomAlphaOfLength(8).toLowerCase(Locale.ROOT);
|
||||
ClusterState state = ClusterStateCreationUtils.state(index, randomBoolean(),
|
||||
ShardRoutingState.STARTED, ShardRoutingState.INITIALIZING);
|
||||
|
||||
// the initial state which is derived from the newly created cluster state but doesn't contain the index
|
||||
ClusterState previousState = ClusterState.builder(state)
|
||||
.metaData(MetaData.builder(state.metaData()).remove(index))
|
||||
.routingTable(RoutingTable.builder().build())
|
||||
.build();
|
||||
|
||||
// pick a data node to simulate the adding an index cluster state change event on, that has shards assigned to it
|
||||
final ShardRouting shardRouting = state.routingTable().index(index).shard(0).replicaShards().get(0);
|
||||
final ShardId shardId = shardRouting.shardId();
|
||||
DiscoveryNode node = state.nodes().get(shardRouting.currentNodeId());
|
||||
|
||||
// simulate the cluster state change on the node
|
||||
ClusterState localState = adaptClusterStateToLocalNode(state, node);
|
||||
ClusterState previousLocalState = adaptClusterStateToLocalNode(previousState, node);
|
||||
IndicesClusterStateService indicesCSSvc = createIndicesClusterStateService(node, RecordingIndicesService::new);
|
||||
indicesCSSvc.start();
|
||||
indicesCSSvc.applyClusterState(new ClusterChangedEvent("cluster state change that adds the index", localState, previousLocalState));
|
||||
previousState = state;
|
||||
|
||||
// start the replica
|
||||
state = cluster.applyStartedShards(state, state.routingTable().index(index).shard(0).replicaShards());
|
||||
|
||||
// close the index and open it up again (this will sometimes swap roles between primary and replica)
|
||||
CloseIndexRequest closeIndexRequest = new CloseIndexRequest(state.metaData().index(index).getIndex().getName());
|
||||
state = cluster.closeIndices(state, closeIndexRequest);
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest(state.metaData().index(index).getIndex().getName());
|
||||
state = cluster.openIndices(state, openIndexRequest);
|
||||
|
||||
localState = adaptClusterStateToLocalNode(state, node);
|
||||
previousLocalState = adaptClusterStateToLocalNode(previousState, node);
|
||||
|
||||
indicesCSSvc.applyClusterState(new ClusterChangedEvent("new cluster state", localState, previousLocalState));
|
||||
|
||||
final MockIndexShard shardOrNull = ((RecordingIndicesService) indicesCSSvc.indicesService).getShardOrNull(shardId);
|
||||
assertThat(shardOrNull == null ? null : shardOrNull.routingEntry(),
|
||||
equalTo(state.getRoutingNodes().node(node.getId()).getByShardId(shardId)));
|
||||
|
||||
}
|
||||
|
||||
public ClusterState randomInitialClusterState(Map<DiscoveryNode, IndicesClusterStateService> clusterStateServiceMap,
|
||||
Supplier<MockIndicesService> indicesServiceSupplier) {
|
||||
List<DiscoveryNode> allNodes = new ArrayList<>();
|
||||
|
|
|
@ -1004,6 +1004,7 @@ public class IndexStatsIT extends ESIntegTestCase {
|
|||
assertEquals(total, shardTotal);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32506")
|
||||
public void testFilterCacheStats() throws Exception {
|
||||
assertAcked(prepareCreate("index").setSettings(Settings.builder().put(indexSettings()).put("number_of_replicas", 0).build()).get());
|
||||
indexRandom(true,
|
||||
|
|
|
@ -19,30 +19,6 @@
|
|||
|
||||
package org.elasticsearch.search;
|
||||
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.SearchHit.NestedIdentity;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightField;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightFieldTests;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.RandomObjects;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.ArrayList;
|
||||
|
@ -50,9 +26,31 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.get.GetResultTests;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.SearchHit.NestedIdentity;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightField;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightFieldTests;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.RandomObjects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentHelper.toXContent;
|
||||
import static org.elasticsearch.test.XContentTestUtils.insertRandomFields;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
|
||||
|
@ -63,8 +61,6 @@ import static org.hamcrest.Matchers.nullValue;
|
|||
|
||||
public class SearchHitTests extends ESTestCase {
|
||||
|
||||
private static Set<String> META_FIELDS = Sets.newHashSet("_uid", "_parent", "_routing", "_size", "_timestamp", "_ttl");
|
||||
|
||||
public static SearchHit createTestItem(boolean withOptionalInnerHits) {
|
||||
int internalId = randomInt();
|
||||
String uid = randomAlphaOfLength(10);
|
||||
|
@ -75,18 +71,7 @@ public class SearchHitTests extends ESTestCase {
|
|||
}
|
||||
Map<String, DocumentField> fields = new HashMap<>();
|
||||
if (randomBoolean()) {
|
||||
int size = randomIntBetween(0, 10);
|
||||
for (int i = 0; i < size; i++) {
|
||||
Tuple<List<Object>, List<Object>> values = RandomObjects.randomStoredFieldValues(random(),
|
||||
XContentType.JSON);
|
||||
if (randomBoolean()) {
|
||||
String metaField = randomFrom(META_FIELDS);
|
||||
fields.put(metaField, new DocumentField(metaField, values.v1()));
|
||||
} else {
|
||||
String fieldName = randomAlphaOfLengthBetween(5, 10);
|
||||
fields.put(fieldName, new DocumentField(fieldName, values.v1()));
|
||||
}
|
||||
}
|
||||
fields = GetResultTests.randomDocumentFields(XContentType.JSON).v1();
|
||||
}
|
||||
SearchHit hit = new SearchHit(internalId, uid, type, nestedIdentity, fields);
|
||||
if (frequently()) {
|
||||
|
@ -109,7 +94,8 @@ public class SearchHitTests extends ESTestCase {
|
|||
int size = randomIntBetween(0, 5);
|
||||
Map<String, HighlightField> highlightFields = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
highlightFields.put(randomAlphaOfLength(5), HighlightFieldTests.createTestItem());
|
||||
HighlightField testItem = HighlightFieldTests.createTestItem();
|
||||
highlightFields.put(testItem.getName(), testItem);
|
||||
}
|
||||
hit.highlightFields(highlightFields);
|
||||
}
|
||||
|
@ -133,9 +119,10 @@ public class SearchHitTests extends ESTestCase {
|
|||
hit.setInnerHits(innerHits);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
String index = randomAlphaOfLengthBetween(5, 10);
|
||||
String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(5, 10);
|
||||
hit.shard(new SearchShardTarget(randomAlphaOfLengthBetween(5, 10),
|
||||
new ShardId(new Index(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLengthBetween(5, 10)), randomInt()), null,
|
||||
OriginalIndices.NONE));
|
||||
new ShardId(new Index(index, randomAlphaOfLengthBetween(5, 10)), randomInt()), clusterAlias, OriginalIndices.NONE));
|
||||
}
|
||||
return hit;
|
||||
}
|
||||
|
|
|
@ -46,13 +46,13 @@ public class SearchSortValuesTests extends ESTestCase {
|
|||
List<Supplier<Object>> valueSuppliers = new ArrayList<>();
|
||||
// this should reflect all values that are allowed to go through the transport layer
|
||||
valueSuppliers.add(() -> null);
|
||||
valueSuppliers.add(() -> randomInt());
|
||||
valueSuppliers.add(() -> randomLong());
|
||||
valueSuppliers.add(() -> randomDouble());
|
||||
valueSuppliers.add(() -> randomFloat());
|
||||
valueSuppliers.add(() -> randomByte());
|
||||
valueSuppliers.add(() -> randomShort());
|
||||
valueSuppliers.add(() -> randomBoolean());
|
||||
valueSuppliers.add(ESTestCase::randomInt);
|
||||
valueSuppliers.add(ESTestCase::randomLong);
|
||||
valueSuppliers.add(ESTestCase::randomDouble);
|
||||
valueSuppliers.add(ESTestCase::randomFloat);
|
||||
valueSuppliers.add(ESTestCase::randomByte);
|
||||
valueSuppliers.add(ESTestCase::randomShort);
|
||||
valueSuppliers.add(ESTestCase::randomBoolean);
|
||||
valueSuppliers.add(() -> frequently() ? randomAlphaOfLengthBetween(1, 30) : randomRealisticUnicodeOfCodepointLength(30));
|
||||
|
||||
int size = randomIntBetween(1, 20);
|
||||
|
|
|
@ -20,8 +20,6 @@
|
|||
package org.elasticsearch.search.aggregations.bucket.histogram;
|
||||
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.rounding.DateTimeUnit;
|
||||
import org.elasticsearch.common.rounding.Rounding;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.InternalAggregations;
|
||||
import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation;
|
||||
|
@ -51,14 +49,6 @@ public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregati
|
|||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
format = randomNumericDocValueFormat();
|
||||
|
||||
roundingInfos = new RoundingInfo[6];
|
||||
roundingInfos[0] = new RoundingInfo(Rounding.builder(DateTimeUnit.SECOND_OF_MINUTE).build(), 1, 5, 10, 30);
|
||||
roundingInfos[1] = new RoundingInfo(Rounding.builder(DateTimeUnit.MINUTES_OF_HOUR).build(), 1, 5, 10, 30);
|
||||
roundingInfos[2] = new RoundingInfo(Rounding.builder(DateTimeUnit.HOUR_OF_DAY).build(), 1, 3, 12);
|
||||
roundingInfos[3] = new RoundingInfo(Rounding.builder(DateTimeUnit.DAY_OF_MONTH).build(), 1, 7);
|
||||
roundingInfos[4] = new RoundingInfo(Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).build(), 1, 3);
|
||||
roundingInfos[5] = new RoundingInfo(Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).build(), 1, 10, 20, 50, 100);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -66,6 +56,8 @@ public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregati
|
|||
List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData,
|
||||
InternalAggregations aggregations) {
|
||||
|
||||
roundingInfos = AutoDateHistogramAggregationBuilder.buildRoundings(null);
|
||||
int nbBuckets = randomNumberOfBuckets();
|
||||
int targetBuckets = randomIntBetween(1, nbBuckets * 2 + 1);
|
||||
List<InternalAutoDateHistogram.Bucket> buckets = new ArrayList<>(nbBuckets);
|
||||
|
@ -81,6 +73,7 @@ public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregati
|
|||
InternalAggregations subAggregations = new InternalAggregations(Collections.emptyList());
|
||||
BucketInfo bucketInfo = new BucketInfo(roundingInfos, randomIntBetween(0, roundingInfos.length - 1), subAggregations);
|
||||
|
||||
|
||||
return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
|
@ -92,13 +85,50 @@ public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregati
|
|||
roundingIdx = histogram.getBucketInfo().roundingIdx;
|
||||
}
|
||||
}
|
||||
Map<Long, Long> expectedCounts = new TreeMap<>();
|
||||
for (Histogram histogram : inputs) {
|
||||
RoundingInfo roundingInfo = roundingInfos[roundingIdx];
|
||||
|
||||
long lowest = Long.MAX_VALUE;
|
||||
long highest = 0;
|
||||
for (InternalAutoDateHistogram histogram : inputs) {
|
||||
for (Histogram.Bucket bucket : histogram.getBuckets()) {
|
||||
expectedCounts.compute(roundingInfos[roundingIdx].rounding.round(((DateTime) bucket.getKey()).getMillis()),
|
||||
(key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount());
|
||||
long bucketKey = ((DateTime) bucket.getKey()).getMillis();
|
||||
if (bucketKey < lowest) {
|
||||
lowest = bucketKey;
|
||||
}
|
||||
if (bucketKey > highest) {
|
||||
highest = bucketKey;
|
||||
}
|
||||
}
|
||||
}
|
||||
long normalizedDuration = (highest - lowest) / roundingInfo.getRoughEstimateDurationMillis();
|
||||
long innerIntervalToUse = 0;
|
||||
for (int interval : roundingInfo.innerIntervals) {
|
||||
if (normalizedDuration / interval < maxNumberOfBuckets()) {
|
||||
innerIntervalToUse = interval;
|
||||
}
|
||||
}
|
||||
Map<Long, Long> expectedCounts = new TreeMap<>();
|
||||
long intervalInMillis = innerIntervalToUse*roundingInfo.getRoughEstimateDurationMillis();
|
||||
for (long keyForBucket = roundingInfo.rounding.round(lowest);
|
||||
keyForBucket <= highest;
|
||||
keyForBucket = keyForBucket + intervalInMillis) {
|
||||
expectedCounts.put(keyForBucket, 0L);
|
||||
|
||||
for (InternalAutoDateHistogram histogram : inputs) {
|
||||
for (Histogram.Bucket bucket : histogram.getBuckets()) {
|
||||
long bucketKey = ((DateTime) bucket.getKey()).getMillis();
|
||||
long roundedBucketKey = roundingInfo.rounding.round(bucketKey);
|
||||
if (roundedBucketKey >= keyForBucket
|
||||
&& roundedBucketKey < keyForBucket + intervalInMillis) {
|
||||
long count = bucket.getDocCount();
|
||||
expectedCounts.compute(keyForBucket,
|
||||
(key, oldValue) -> (oldValue == null ? 0 : oldValue) + count);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Map<Long, Long> actualCounts = new TreeMap<>();
|
||||
for (Histogram.Bucket bucket : reduced.getBuckets()) {
|
||||
actualCounts.compute(((DateTime) bucket.getKey()).getMillis(),
|
||||
|
@ -117,12 +147,6 @@ public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregati
|
|||
return ParsedAutoDateHistogram.class;
|
||||
}
|
||||
|
||||
@Override
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32215")
|
||||
public void testReduceRandom() {
|
||||
super.testReduceRandom();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected InternalAutoDateHistogram mutateInstance(InternalAutoDateHistogram instance) {
|
||||
String name = instance.getName();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue