Merge remote-tracking branch 'es/master' into ccr

* es/master:
  Move score script context from SearchScript to its own class (#30816)
  Fix bad version check writing Repository nodes (#30846)
  [docs] explainer for java packaging tests (#30825)
  Remove Throwable usage from transport modules (#30845)
  REST high-level client: add put ingest pipeline API (#30793)
  Update the version checks around ip_range bucket keys, now that the change was backported.
  Mute IndexMasterFailoverIT.testMasterFailoverDuringIndexingWithMappingChanges
  Use geohash cell instead of just a corner in geo_bounding_box (#30698)
  Limit user to single concurrent auth per realm (#30794)
  [Tests] Move templated _rank_eval tests (#30679)
  Security: fix dynamic mapping updates with aliases (#30787)
  Ensure that ip_range aggregations always return bucket keys. (#30701)
  Use remote client in TransportFieldCapsAction (#30838)
  Move Watcher versioning setting to meta field (#30832)
  [Docs] Explain incomplete dates in range queries (#30689)
  Move persistent task registrations to core (#30755)
  Decouple ClusterStateTaskListener & ClusterApplier (#30809)
  Send client headers from TransportClient (#30803)
  Packaging: Ensure upgrade_is_oss flag file is always deleted (#30732)
  Force stable file modes for built packages (#30823)
This commit is contained in:
Martijn van Groningen 2018-05-25 11:07:45 +02:00
commit e6dd4144ce
No known key found for this signature in database
GPG Key ID: AB236F4FCF2AF12A
115 changed files with 2359 additions and 840 deletions

View File

@ -11,6 +11,7 @@ import org.gradle.api.internal.artifacts.dependencies.DefaultProjectDependency
import org.gradle.api.tasks.Copy
import org.gradle.api.tasks.Delete
import org.gradle.api.tasks.Exec
import org.gradle.api.tasks.StopExecutionException
import org.gradle.api.tasks.TaskState
import static java.util.Collections.unmodifiableList
@ -285,8 +286,10 @@ class VagrantTestPlugin implements Plugin<Project> {
dependsOn copyPackagingArchives
doFirst {
project.delete("${archivesDir}/upgrade_is_oss")
if (project.extensions.esvagrant.upgradeFromVersion.before('6.3.0')) {
throw new StopExecutionException("upgrade version is before 6.3.0")
}
}
onlyIf { project.extensions.esvagrant.upgradeFromVersion.onOrAfter('6.3.0') }
file "${archivesDir}/upgrade_is_oss"
contents ''
}

View File

@ -25,6 +25,8 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.action.ingest.PutPipelineResponse;
import java.io.IOException;
@ -87,4 +89,26 @@ public final class ClusterClient {
restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent,
listener, emptySet(), headers);
}
/**
* Add a pipeline or update an existing pipeline in the cluster
* <p>
* See
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/put-pipeline-api.html"> Put Pipeline API on elastic.co</a>
*/
public PutPipelineResponse putPipeline(PutPipelineRequest request, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::putPipeline,
PutPipelineResponse::fromXContent, emptySet(), headers);
}
/**
* Asynchronously add a pipeline or update an existing pipeline in the cluster
* <p>
* See
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/put-pipeline-api.html"> Put Pipeline API on elastic.co</a>
*/
public void putPipelineAsync(PutPipelineRequest request, ActionListener<PutPipelineResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::putPipeline,
PutPipelineResponse::fromXContent, listener, emptySet(), headers);
}
}

View File

@ -58,6 +58,7 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.MultiGetRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.SearchRequest;
@ -609,6 +610,21 @@ final class RequestConverters {
return request;
}
static Request putPipeline(PutPipelineRequest putPipelineRequest) throws IOException {
String endpoint = new EndpointBuilder()
.addPathPartAsIs("_ingest/pipeline")
.addPathPart(putPipelineRequest.getId())
.build();
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
Params parameters = new Params(request);
parameters.withTimeout(putPipelineRequest.timeout());
parameters.withMasterTimeout(putPipelineRequest.masterNodeTimeout());
request.setEntity(createEntity(putPipelineRequest, REQUEST_BODY_CONTENT_TYPE));
return request;
}
static Request listTasks(ListTasksRequest listTaskRequest) {
if (listTaskRequest.getTaskId() != null && listTaskRequest.getTaskId().isSet()) {
throw new IllegalArgumentException("TaskId cannot be used for list tasks request");

View File

@ -25,12 +25,17 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.action.ingest.PutPipelineResponse;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.ingest.Pipeline;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.tasks.TaskInfo;
@ -136,4 +141,41 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
}
assertTrue("List tasks were not found", listTasksFound);
}
public void testPutPipeline() throws IOException {
String id = "some_pipeline_id";
XContentType xContentType = randomFrom(XContentType.values());
XContentBuilder pipelineBuilder = XContentBuilder.builder(xContentType.xContent());
pipelineBuilder.startObject();
{
pipelineBuilder.field(Pipeline.DESCRIPTION_KEY, "some random set of processors");
pipelineBuilder.startArray(Pipeline.PROCESSORS_KEY);
{
pipelineBuilder.startObject().startObject("set");
{
pipelineBuilder
.field("field", "foo")
.field("value", "bar");
}
pipelineBuilder.endObject().endObject();
pipelineBuilder.startObject().startObject("convert");
{
pipelineBuilder
.field("field", "rank")
.field("type", "integer");
}
pipelineBuilder.endObject().endObject();
}
pipelineBuilder.endArray();
}
pipelineBuilder.endObject();
PutPipelineRequest request = new PutPipelineRequest(
id,
BytesReference.bytes(pipelineBuilder),
pipelineBuilder.contentType());
PutPipelineResponse putPipelineResponse =
execute(request, highLevelClient().cluster()::putPipeline, highLevelClient().cluster()::putPipelineAsync);
assertTrue(putPipelineResponse.isAcknowledged());
}
}

View File

@ -61,6 +61,7 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.MultiGetRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.SearchRequest;
@ -91,6 +92,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.RandomCreateIndexGenerator;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.query.TermQueryBuilder;
@ -119,6 +121,7 @@ import org.elasticsearch.test.RandomObjects;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
@ -1402,6 +1405,26 @@ public class RequestConvertersTests extends ESTestCase {
assertEquals(expectedParams, expectedRequest.getParameters());
}
public void testPutPipeline() throws IOException {
String pipelineId = "some_pipeline_id";
PutPipelineRequest request = new PutPipelineRequest(
"some_pipeline_id",
new BytesArray("{}".getBytes(StandardCharsets.UTF_8)),
XContentType.JSON
);
Map<String, String> expectedParams = new HashMap<>();
setRandomMasterTimeout(request, expectedParams);
setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
Request expectedRequest = RequestConverters.putPipeline(request);
StringJoiner endpoint = new StringJoiner("/", "/", "");
endpoint.add("_ingest/pipeline");
endpoint.add(pipelineId);
assertEquals(endpoint.toString(), expectedRequest.getEndpoint());
assertEquals(HttpPut.METHOD_NAME, expectedRequest.getMethod());
assertEquals(expectedParams, expectedRequest.getParameters());
}
public void testRollover() throws IOException {
RolloverRequest rolloverRequest = new RolloverRequest(randomAlphaOfLengthBetween(3, 10),
randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10));

View File

@ -21,7 +21,6 @@ package org.elasticsearch.client.documentation;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.LatchedActionListener;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
@ -29,9 +28,12 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.action.ingest.PutPipelineResponse;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.TimeValue;
@ -41,6 +43,7 @@ import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.tasks.TaskInfo;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -80,19 +83,19 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase
// end::put-settings-request
// tag::put-settings-create-settings
String transientSettingKey =
String transientSettingKey =
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey();
int transientSettingValue = 10;
Settings transientSettings =
Settings transientSettings =
Settings.builder()
.put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES)
.build(); // <1>
String persistentSettingKey =
String persistentSettingKey =
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey();
String persistentSettingValue =
String persistentSettingValue =
EnableAllocationDecider.Allocation.NONE.name();
Settings persistentSettings =
Settings persistentSettings =
Settings.builder()
.put(persistentSettingKey, persistentSettingValue)
.build(); // <2>
@ -105,9 +108,9 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase
{
// tag::put-settings-settings-builder
Settings.Builder transientSettingsBuilder =
Settings.Builder transientSettingsBuilder =
Settings.builder()
.put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES);
.put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES);
request.transientSettings(transientSettingsBuilder); // <1>
// end::put-settings-settings-builder
}
@ -164,7 +167,7 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase
ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest();
// tag::put-settings-execute-listener
ActionListener<ClusterUpdateSettingsResponse> listener =
ActionListener<ClusterUpdateSettingsResponse> listener =
new ActionListener<ClusterUpdateSettingsResponse>() {
@Override
public void onResponse(ClusterUpdateSettingsResponse response) {
@ -272,4 +275,80 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
public void testPutPipeline() throws IOException {
RestHighLevelClient client = highLevelClient();
{
// tag::put-pipeline-request
String source =
"{\"description\":\"my set of processors\"," +
"\"processors\":[{\"set\":{\"field\":\"foo\",\"value\":\"bar\"}}]}";
PutPipelineRequest request = new PutPipelineRequest(
"my-pipeline-id", // <1>
new BytesArray(source.getBytes(StandardCharsets.UTF_8)), // <2>
XContentType.JSON // <3>
);
// end::put-pipeline-request
// tag::put-pipeline-request-timeout
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
request.timeout("2m"); // <2>
// end::put-pipeline-request-timeout
// tag::put-pipeline-request-masterTimeout
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
request.masterNodeTimeout("1m"); // <2>
// end::put-pipeline-request-masterTimeout
// tag::put-pipeline-execute
PutPipelineResponse response = client.cluster().putPipeline(request); // <1>
// end::put-pipeline-execute
// tag::put-pipeline-response
boolean acknowledged = response.isAcknowledged(); // <1>
// end::put-pipeline-response
assertTrue(acknowledged);
}
}
public void testPutPipelineAsync() throws Exception {
RestHighLevelClient client = highLevelClient();
{
String source =
"{\"description\":\"my set of processors\"," +
"\"processors\":[{\"set\":{\"field\":\"foo\",\"value\":\"bar\"}}]}";
PutPipelineRequest request = new PutPipelineRequest(
"my-pipeline-id",
new BytesArray(source.getBytes(StandardCharsets.UTF_8)),
XContentType.JSON
);
// tag::put-pipeline-execute-listener
ActionListener<PutPipelineResponse> listener =
new ActionListener<PutPipelineResponse>() {
@Override
public void onResponse(PutPipelineResponse response) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::put-pipeline-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::put-pipeline-execute-async
client.cluster().putPipelineAsync(request, listener); // <1>
// end::put-pipeline-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
}

View File

@ -242,6 +242,8 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) {
if (it.relativePath.segments[-2] == 'bin') {
// bin files, wherever they are within modules (eg platform specific) should be executable
it.mode = 0755
} else {
it.mode = 0644
}
}
if (oss) {

View File

@ -122,6 +122,7 @@ Closure commonPackageConfig(String type, boolean oss) {
}
from(rootProject.projectDir) {
include 'README.textile'
fileMode 0644
}
into('modules') {
with copySpec {
@ -135,6 +136,11 @@ Closure commonPackageConfig(String type, boolean oss) {
for (int i = segments.length - 2; i > 0 && segments[i] != 'modules'; --i) {
directory('/' + segments[0..i].join('/'), 0755)
}
if (segments[-2] == 'bin') {
fcp.mode = 0755
} else {
fcp.mode = 0644
}
}
}
}
@ -153,6 +159,7 @@ Closure commonPackageConfig(String type, boolean oss) {
include oss ? 'APACHE-LICENSE-2.0.txt' : 'ELASTIC-LICENSE.txt'
rename { 'LICENSE.txt' }
}
fileMode 0644
}
}
@ -180,14 +187,17 @@ Closure commonPackageConfig(String type, boolean oss) {
// ========= systemd =========
into('/usr/lib/tmpfiles.d') {
from "${packagingFiles}/systemd/elasticsearch.conf"
fileMode 0644
}
into('/usr/lib/systemd/system') {
fileType CONFIG | NOREPLACE
from "${packagingFiles}/systemd/elasticsearch.service"
fileMode 0644
}
into('/usr/lib/sysctl.d') {
fileType CONFIG | NOREPLACE
from "${packagingFiles}/systemd/sysctl/elasticsearch.conf"
fileMode 0644
}
// ========= sysV init =========

View File

@ -0,0 +1,83 @@
[[java-rest-high-cluster-put-pipeline]]
=== Put Pipeline API
[[java-rest-high-cluster-put-pipeline-request]]
==== Put Pipeline Request
A `PutPipelineRequest` requires an `id` argument, a source and a `XContentType`. The source consists
of a description and a list of `Processor` objects.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-request]
--------------------------------------------------
<1> The pipeline id
<2> The source for the pipeline as a `ByteArray`.
<3> The XContentType for the pipeline source supplied above.
==== Optional arguments
The following arguments can optionally be provided:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-request-timeout]
--------------------------------------------------
<1> Timeout to wait for the all the nodes to acknowledge the index creation as a `TimeValue`
<2> Timeout to wait for the all the nodes to acknowledge the index creation as a `String`
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-request-masterTimeout]
--------------------------------------------------
<1> Timeout to connect to the master node as a `TimeValue`
<2> Timeout to connect to the master node as a `String`
[[java-rest-high-cluster-put-pipeline-sync]]
==== Synchronous Execution
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-execute]
--------------------------------------------------
<1> Execute the request and get back the response in a PutPipelineResponse object.
[[java-rest-high-cluster-put-pipeline-async]]
==== Asynchronous Execution
The asynchronous execution of a put pipeline request requires both the `PutPipelineRequest`
instance and an `ActionListener` instance to be passed to the asynchronous
method:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-execute-async]
--------------------------------------------------
<1> The `PutPipelineRequest` to execute and the `ActionListener` to use when
the execution completes
The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back using the `onResponse` method
if the execution successfully completed or using the `onFailure` method if
it failed.
A typical listener for `PutPipelineResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed. The response is
provided as an argument
<2> Called in case of failure. The raised exception is provided as an argument
[[java-rest-high-cluster-put-pipeline-response]]
==== Put Pipeline Response
The returned `PutPipelineResponse` allows to retrieve information about the executed
operation as follows:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-pipeline-response]
--------------------------------------------------
<1> Indicates whether all of the nodes have acknowledged the request

View File

@ -105,9 +105,11 @@ The Java High Level REST Client supports the following Cluster APIs:
* <<java-rest-high-cluster-put-settings>>
* <<java-rest-high-cluster-list-tasks>>
* <<java-rest-high-cluster-put-pipeline>>
include::cluster/put_settings.asciidoc[]
include::cluster/list_tasks.asciidoc[]
include::cluster/put_pipeline.asciidoc[]
== Snapshot APIs

View File

@ -37,10 +37,12 @@ Response:
"ip_ranges": {
"buckets" : [
{
"key": "*-10.0.0.5",
"to": "10.0.0.5",
"doc_count": 10
},
{
"key": "10.0.0.5-*",
"from": "10.0.0.5",
"doc_count": 260
}

View File

@ -12,6 +12,9 @@
* Purely negative queries (only MUST_NOT clauses) now return a score of `0`
rather than `1`.
* The boundary specified using geohashes in the `geo_bounding_box` query
now include entire geohash cell, instead of just geohash center.
==== Adaptive replica selection enabled by default
Adaptive replica selection has been enabled by default. If you wish to return to

View File

@ -231,6 +231,38 @@ GET /_search
--------------------------------------------------
// CONSOLE
When geohashes are used to specify the bounding the edges of the
bounding box, the geohashes are treated as rectangles. The bounding
box is defined in such a way that its top left corresponds to the top
left corner of the geohash specified in the `top_left` parameter and
its bottom right is defined as the bottom right of the geohash
specified in the `bottom_right` parameter.
In order to specify a bounding box that would match entire area of a
geohash the geohash can be specified in both `top_left` and
`bottom_right` parameters:
[source,js]
--------------------------------------------------
GET /_search
{
"query": {
"geo_bounding_box" : {
"pin.location" : {
"top_left" : "dr",
"bottom_right" : "dr"
}
}
}
}
--------------------------------------------------
// CONSOLE
In this example, the geohash `dr` will produce the bounding box
query with the top left corner at `45.0,-78.75` and the bottom right
corner at `39.375,-67.5`.
[float]
==== Vertices

View File

@ -109,6 +109,12 @@ GET _search
--------------------------------------------------
// CONSOLE
Note that if the date misses some of the year, month and day coordinates, the
missing parts are filled with the start of
https://en.wikipedia.org/wiki/Unix_time[unix time], which is January 1st, 1970.
This means, that when e.g. specifying `dd` as the format, a value like `"gte" : 10`
will translate to `1970-01-10T00:00:00.000Z`.
===== Time zone in range queries
Dates can be converted from another timezone to UTC either by specifying the

View File

@ -0,0 +1,63 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.concurrent;
import java.util.concurrent.CompletableFuture;
import java.util.function.BiConsumer;
/**
* A thread-safe completable context that allows listeners to be attached. This class relies on the
* {@link CompletableFuture} for the concurrency logic. However, it does not accept {@link Throwable} as
* an exceptional result. This allows attaching listeners that only handle {@link Exception}.
*
* @param <T> the result type
*/
public class CompletableContext<T> {
private final CompletableFuture<T> completableFuture = new CompletableFuture<>();
public void addListener(BiConsumer<T, ? super Exception> listener) {
BiConsumer<T, Throwable> castThrowable = (v, t) -> {
if (t == null) {
listener.accept(v, null);
} else {
assert !(t instanceof Error) : "Cannot be error";
listener.accept(v, (Exception) t);
}
};
completableFuture.whenComplete(castThrowable);
}
public boolean isDone() {
return completableFuture.isDone();
}
public boolean isCompletedExceptionally() {
return completableFuture.isCompletedExceptionally();
}
public boolean completeExceptionally(Exception ex) {
return completableFuture.completeExceptionally(ex);
}
public boolean complete(T value) {
return completableFuture.complete(value);
}
}

View File

@ -33,6 +33,8 @@ publishing {
}
dependencies {
compile "org.elasticsearch:elasticsearch-core:${version}"
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
testCompile "junit:junit:${versions.junit}"
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"

View File

@ -28,7 +28,7 @@ public abstract class BytesWriteHandler implements ReadWriteHandler {
private static final List<FlushOperation> EMPTY_LIST = Collections.emptyList();
public WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer<Void, Throwable> listener) {
public WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer<Void, Exception> listener) {
assert message instanceof ByteBuffer[] : "This channel only supports messages that are of type: " + ByteBuffer[].class
+ ". Found type: " + message.getClass() + ".";
return new FlushReadyWrite(context, (ByteBuffer[]) message, listener);

View File

@ -19,11 +19,12 @@
package org.elasticsearch.nio;
import org.elasticsearch.common.concurrent.CompletableContext;
import java.io.IOException;
import java.nio.channels.NetworkChannel;
import java.nio.channels.SelectableChannel;
import java.nio.channels.SelectionKey;
import java.util.concurrent.CompletableFuture;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
@ -37,7 +38,7 @@ public abstract class ChannelContext<S extends SelectableChannel & NetworkChanne
protected final S rawChannel;
private final Consumer<Exception> exceptionHandler;
private final CompletableFuture<Void> closeContext = new CompletableFuture<>();
private final CompletableContext<Void> closeContext = new CompletableContext<>();
private volatile SelectionKey selectionKey;
ChannelContext(S rawChannel, Consumer<Exception> exceptionHandler) {
@ -81,8 +82,8 @@ public abstract class ChannelContext<S extends SelectableChannel & NetworkChanne
*
* @param listener to be called
*/
public void addCloseListener(BiConsumer<Void, Throwable> listener) {
closeContext.whenComplete(listener);
public void addCloseListener(BiConsumer<Void, Exception> listener) {
closeContext.addListener(listener);
}
public boolean isOpen() {

View File

@ -25,13 +25,13 @@ import java.util.function.BiConsumer;
public class FlushOperation {
private final BiConsumer<Void, Throwable> listener;
private final BiConsumer<Void, Exception> listener;
private final ByteBuffer[] buffers;
private final int[] offsets;
private final int length;
private int internalIndex;
public FlushOperation(ByteBuffer[] buffers, BiConsumer<Void, Throwable> listener) {
public FlushOperation(ByteBuffer[] buffers, BiConsumer<Void, Exception> listener) {
this.listener = listener;
this.buffers = buffers;
this.offsets = new int[buffers.length];
@ -44,7 +44,7 @@ public class FlushOperation {
length = offset;
}
public BiConsumer<Void, Throwable> getListener() {
public BiConsumer<Void, Exception> getListener() {
return listener;
}

View File

@ -27,7 +27,7 @@ public class FlushReadyWrite extends FlushOperation implements WriteOperation {
private final SocketChannelContext channelContext;
private final ByteBuffer[] buffers;
FlushReadyWrite(SocketChannelContext channelContext, ByteBuffer[] buffers, BiConsumer<Void, Throwable> listener) {
FlushReadyWrite(SocketChannelContext channelContext, ByteBuffer[] buffers, BiConsumer<Void, Exception> listener) {
super(buffers, listener);
this.channelContext = channelContext;
this.buffers = buffers;

View File

@ -53,7 +53,7 @@ public abstract class NioChannel {
*
* @param listener to be called at close
*/
public void addCloseListener(BiConsumer<Void, Throwable> listener) {
public void addCloseListener(BiConsumer<Void, Exception> listener) {
getContext().addCloseListener(listener);
}

View File

@ -60,7 +60,7 @@ public class NioSocketChannel extends NioChannel {
return remoteAddress;
}
public void addConnectListener(BiConsumer<Void, Throwable> listener) {
public void addConnectListener(BiConsumer<Void, Exception> listener) {
context.addConnectListener(listener);
}

View File

@ -38,7 +38,7 @@ public interface ReadWriteHandler {
* @param listener the listener to be called when the message is sent
* @return the write operation to be queued
*/
WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer<Void, Throwable> listener);
WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer<Void, Exception> listener);
/**
* This method is called on the event loop thread. It should serialize a write operation object to bytes

View File

@ -19,6 +19,7 @@
package org.elasticsearch.nio;
import org.elasticsearch.common.concurrent.CompletableContext;
import org.elasticsearch.nio.utils.ExceptionsHelper;
import java.io.IOException;
@ -27,7 +28,6 @@ import java.nio.channels.ClosedChannelException;
import java.nio.channels.SocketChannel;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
@ -48,7 +48,7 @@ public abstract class SocketChannelContext extends ChannelContext<SocketChannel>
protected final AtomicBoolean isClosing = new AtomicBoolean(false);
private final ReadWriteHandler readWriteHandler;
private final SocketSelector selector;
private final CompletableFuture<Void> connectContext = new CompletableFuture<>();
private final CompletableContext<Void> connectContext = new CompletableContext<>();
private final LinkedList<FlushOperation> pendingFlushes = new LinkedList<>();
private boolean ioException;
private boolean peerClosed;
@ -73,8 +73,8 @@ public abstract class SocketChannelContext extends ChannelContext<SocketChannel>
return channel;
}
public void addConnectListener(BiConsumer<Void, Throwable> listener) {
connectContext.whenComplete(listener);
public void addConnectListener(BiConsumer<Void, Exception> listener) {
connectContext.addListener(listener);
}
public boolean isConnectComplete() {
@ -121,7 +121,7 @@ public abstract class SocketChannelContext extends ChannelContext<SocketChannel>
return isConnected;
}
public void sendMessage(Object message, BiConsumer<Void, Throwable> listener) {
public void sendMessage(Object message, BiConsumer<Void, Exception> listener) {
if (isClosing.get()) {
listener.accept(null, new ClosedChannelException());
return;

View File

@ -138,7 +138,7 @@ public class SocketSelector extends ESSelector {
* @param listener to be executed
* @param value to provide to listener
*/
public <V> void executeListener(BiConsumer<V, Throwable> listener, V value) {
public <V> void executeListener(BiConsumer<V, Exception> listener, V value) {
assertOnSelectorThread();
try {
listener.accept(value, null);
@ -154,7 +154,7 @@ public class SocketSelector extends ESSelector {
* @param listener to be executed
* @param exception to provide to listener
*/
public <V> void executeFailedListener(BiConsumer<V, Throwable> listener, Exception exception) {
public <V> void executeFailedListener(BiConsumer<V, Exception> listener, Exception exception) {
assertOnSelectorThread();
try {
listener.accept(null, exception);

View File

@ -27,7 +27,7 @@ import java.util.function.BiConsumer;
*/
public interface WriteOperation {
BiConsumer<Void, Throwable> getListener();
BiConsumer<Void, Exception> getListener();
SocketChannelContext getChannel();

View File

@ -45,7 +45,7 @@ public class BytesChannelContextTests extends ESTestCase {
private BytesChannelContext context;
private InboundChannelBuffer channelBuffer;
private SocketSelector selector;
private BiConsumer<Void, Throwable> listener;
private BiConsumer<Void, Exception> listener;
private int messageLength;
@Before
@ -191,7 +191,7 @@ public class BytesChannelContextTests extends ESTestCase {
public void testMultipleWritesPartialFlushes() throws IOException {
assertFalse(context.readyForFlush());
BiConsumer<Void, Throwable> listener2 = mock(BiConsumer.class);
BiConsumer<Void, Exception> listener2 = mock(BiConsumer.class);
FlushReadyWrite flushOperation1 = mock(FlushReadyWrite.class);
FlushReadyWrite flushOperation2 = mock(FlushReadyWrite.class);
when(flushOperation1.getBuffersToWrite()).thenReturn(new ByteBuffer[0]);

View File

@ -83,7 +83,7 @@ public class ChannelContextTests extends ESTestCase {
if (t == null) {
throw new AssertionError("Close should not fail");
} else {
exception.set((Exception) t);
exception.set(t);
}
});

View File

@ -31,7 +31,7 @@ import static org.mockito.Mockito.mock;
public class FlushOperationTests extends ESTestCase {
private BiConsumer<Void, Throwable> listener;
private BiConsumer<Void, Exception> listener;
@Before
@SuppressWarnings("unchecked")

View File

@ -50,7 +50,7 @@ public class SocketChannelContextTests extends ESTestCase {
private TestSocketChannelContext context;
private Consumer<Exception> exceptionHandler;
private NioSocketChannel channel;
private BiConsumer<Void, Throwable> listener;
private BiConsumer<Void, Exception> listener;
private SocketSelector selector;
private ReadWriteHandler readWriteHandler;
@ -125,7 +125,7 @@ public class SocketChannelContextTests extends ESTestCase {
if (t == null) {
throw new AssertionError("Connection should not succeed");
} else {
exception.set((Exception) t);
exception.set(t);
}
});
@ -206,7 +206,7 @@ public class SocketChannelContextTests extends ESTestCase {
ByteBuffer[] buffer = {ByteBuffer.allocate(10)};
WriteOperation writeOperation = mock(WriteOperation.class);
BiConsumer<Void, Throwable> listener2 = mock(BiConsumer.class);
BiConsumer<Void, Exception> listener2 = mock(BiConsumer.class);
when(readWriteHandler.writeToBytes(writeOperation)).thenReturn(Arrays.asList(new FlushOperation(buffer, listener),
new FlushOperation(buffer, listener2)));
context.queueWriteOperation(writeOperation);
@ -232,7 +232,7 @@ public class SocketChannelContextTests extends ESTestCase {
ByteBuffer[] buffer = {ByteBuffer.allocate(10)};
BiConsumer<Void, Throwable> listener2 = mock(BiConsumer.class);
BiConsumer<Void, Exception> listener2 = mock(BiConsumer.class);
assertFalse(context.readyForFlush());
when(channel.isOpen()).thenReturn(true);

View File

@ -50,7 +50,7 @@ public class SocketSelectorTests extends ESTestCase {
private NioSocketChannel channel;
private TestSelectionKey selectionKey;
private SocketChannelContext channelContext;
private BiConsumer<Void, Throwable> listener;
private BiConsumer<Void, Exception> listener;
private ByteBuffer[] buffers = {ByteBuffer.allocate(1)};
private Selector rawSelector;

View File

@ -23,8 +23,10 @@ import org.apache.lucene.expressions.Expression;
import org.apache.lucene.expressions.SimpleBindings;
import org.apache.lucene.expressions.js.JavascriptCompiler;
import org.apache.lucene.expressions.js.VariableContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.queries.function.valuesource.DoubleConstValueSource;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.SortField;
import org.elasticsearch.SpecialPermission;
import org.elasticsearch.common.Nullable;
@ -39,12 +41,14 @@ import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.script.ClassPermission;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.FilterScript;
import org.elasticsearch.script.ScoreScript;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptEngine;
import org.elasticsearch.script.ScriptException;
import org.elasticsearch.script.SearchScript;
import org.elasticsearch.search.lookup.SearchLookup;
import java.io.IOException;
import java.security.AccessControlContext;
import java.security.AccessController;
import java.security.PrivilegedAction;
@ -111,6 +115,9 @@ public class ExpressionScriptEngine extends AbstractComponent implements ScriptE
} else if (context.instanceClazz.equals(FilterScript.class)) {
FilterScript.Factory factory = (p, lookup) -> newFilterScript(expr, lookup, p);
return context.factoryClazz.cast(factory);
} else if (context.instanceClazz.equals(ScoreScript.class)) {
ScoreScript.Factory factory = (p, lookup) -> newScoreScript(expr, lookup, p);
return context.factoryClazz.cast(factory);
}
throw new IllegalArgumentException("expression engine does not know how to handle script context [" + context.name + "]");
}
@ -260,6 +267,42 @@ public class ExpressionScriptEngine extends AbstractComponent implements ScriptE
};
};
}
private ScoreScript.LeafFactory newScoreScript(Expression expr, SearchLookup lookup, @Nullable Map<String, Object> vars) {
SearchScript.LeafFactory searchLeafFactory = newSearchScript(expr, lookup, vars);
return new ScoreScript.LeafFactory() {
@Override
public boolean needs_score() {
return searchLeafFactory.needs_score();
}
@Override
public ScoreScript newInstance(LeafReaderContext ctx) throws IOException {
SearchScript script = searchLeafFactory.newInstance(ctx);
return new ScoreScript(vars, lookup, ctx) {
@Override
public double execute() {
return script.runAsDouble();
}
@Override
public void setDocument(int docid) {
script.setDocument(docid);
}
@Override
public void setScorer(Scorer scorer) {
script.setScorer(scorer);
}
@Override
public double get_score() {
return script.getScore();
}
};
}
};
}
/**
* converts a ParseException at compile-time or link-time to a ScriptException

View File

@ -20,26 +20,21 @@
package org.elasticsearch.transport.netty4;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelPromise;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.concurrent.CompletableContext;
import org.elasticsearch.transport.TcpChannel;
import org.elasticsearch.transport.TransportException;
import java.net.InetSocketAddress;
import java.nio.channels.ClosedSelectorException;
import java.util.concurrent.CompletableFuture;
public class NettyTcpChannel implements TcpChannel {
private final Channel channel;
private final String profile;
private final CompletableFuture<Void> closeContext = new CompletableFuture<>();
private final CompletableContext<Void> closeContext = new CompletableContext<>();
NettyTcpChannel(Channel channel, String profile) {
this.channel = channel;
@ -51,9 +46,9 @@ public class NettyTcpChannel implements TcpChannel {
Throwable cause = f.cause();
if (cause instanceof Error) {
Netty4Utils.maybeDie(cause);
closeContext.completeExceptionally(cause);
closeContext.completeExceptionally(new Exception(cause));
} else {
closeContext.completeExceptionally(cause);
closeContext.completeExceptionally((Exception) cause);
}
}
});
@ -71,7 +66,7 @@ public class NettyTcpChannel implements TcpChannel {
@Override
public void addCloseListener(ActionListener<Void> listener) {
closeContext.whenComplete(ActionListener.toBiConsumer(listener));
closeContext.addListener(ActionListener.toBiConsumer(listener));
}
@Override

View File

@ -30,9 +30,9 @@ import org.apache.lucene.index.Term;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.ScriptPlugin;
import org.elasticsearch.script.ScoreScript;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptEngine;
import org.elasticsearch.script.SearchScript;
/**
* An example script plugin that adds a {@link ScriptEngine} implementing expert scoring.
@ -54,12 +54,12 @@ public class ExpertScriptPlugin extends Plugin implements ScriptPlugin {
@Override
public <T> T compile(String scriptName, String scriptSource, ScriptContext<T> context, Map<String, String> params) {
if (context.equals(SearchScript.SCRIPT_SCORE_CONTEXT) == false) {
if (context.equals(ScoreScript.CONTEXT) == false) {
throw new IllegalArgumentException(getType() + " scripts cannot be used for context [" + context.name + "]");
}
// we use the script "source" as the script identifier
if ("pure_df".equals(scriptSource)) {
SearchScript.Factory factory = (p, lookup) -> new SearchScript.LeafFactory() {
ScoreScript.Factory factory = (p, lookup) -> new ScoreScript.LeafFactory() {
final String field;
final String term;
{
@ -74,18 +74,18 @@ public class ExpertScriptPlugin extends Plugin implements ScriptPlugin {
}
@Override
public SearchScript newInstance(LeafReaderContext context) throws IOException {
public ScoreScript newInstance(LeafReaderContext context) throws IOException {
PostingsEnum postings = context.reader().postings(new Term(field, term));
if (postings == null) {
// the field and/or term don't exist in this segment, so always return 0
return new SearchScript(p, lookup, context) {
return new ScoreScript(p, lookup, context) {
@Override
public double runAsDouble() {
public double execute() {
return 0.0d;
}
};
}
return new SearchScript(p, lookup, context) {
return new ScoreScript(p, lookup, context) {
int currentDocid = -1;
@Override
public void setDocument(int docid) {
@ -100,7 +100,7 @@ public class ExpertScriptPlugin extends Plugin implements ScriptPlugin {
currentDocid = docid;
}
@Override
public double runAsDouble() {
public double execute() {
if (postings.docID() != currentDocid) {
// advance moved past the current doc, so this doc has no occurrences of the term
return 0.0d;

View File

@ -96,7 +96,7 @@ public class HttpReadWriteHandler implements ReadWriteHandler {
}
@Override
public WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer<Void, Throwable> listener) {
public WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer<Void, Exception> listener) {
assert message instanceof NioHttpResponse : "This channel only supports messages that are of type: "
+ NioHttpResponse.class + ". Found type: " + message.getClass() + ".";
return new HttpWriteOperation(context, (NioHttpResponse) message, listener);

View File

@ -28,16 +28,16 @@ public class HttpWriteOperation implements WriteOperation {
private final SocketChannelContext channelContext;
private final NioHttpResponse response;
private final BiConsumer<Void, Throwable> listener;
private final BiConsumer<Void, Exception> listener;
HttpWriteOperation(SocketChannelContext channelContext, NioHttpResponse response, BiConsumer<Void, Throwable> listener) {
HttpWriteOperation(SocketChannelContext channelContext, NioHttpResponse response, BiConsumer<Void, Exception> listener) {
this.channelContext = channelContext;
this.response = response;
this.listener = listener;
}
@Override
public BiConsumer<Void, Throwable> getListener() {
public BiConsumer<Void, Exception> getListener() {
return listener;
}

View File

@ -36,7 +36,7 @@ import java.util.function.BiConsumer;
* complete that promise when accept is called. It delegates the normal promise methods to the underlying
* promise.
*/
public class NettyListener implements BiConsumer<Void, Throwable>, ChannelPromise {
public class NettyListener implements BiConsumer<Void, Exception>, ChannelPromise {
private final ChannelPromise promise;
@ -45,11 +45,11 @@ public class NettyListener implements BiConsumer<Void, Throwable>, ChannelPromis
}
@Override
public void accept(Void v, Throwable throwable) {
if (throwable == null) {
public void accept(Void v, Exception exception) {
if (exception == null) {
promise.setSuccess();
} else {
promise.setFailure(throwable);
promise.setFailure(exception);
}
}
@ -212,17 +212,22 @@ public class NettyListener implements BiConsumer<Void, Throwable>, ChannelPromis
return promise.unvoid();
}
public static NettyListener fromBiConsumer(BiConsumer<Void, Throwable> biConsumer, Channel channel) {
public static NettyListener fromBiConsumer(BiConsumer<Void, Exception> biConsumer, Channel channel) {
if (biConsumer instanceof NettyListener) {
return (NettyListener) biConsumer;
} else {
ChannelPromise channelPromise = channel.newPromise();
channelPromise.addListener(f -> {
if (f.cause() == null) {
Throwable cause = f.cause();
if (cause == null) {
biConsumer.accept(null, null);
} else {
ExceptionsHelper.dieOnError(f.cause());
biConsumer.accept(null, f.cause());
if (cause instanceof Error) {
ExceptionsHelper.dieOnError(cause);
biConsumer.accept(null, new Exception(cause));
} else {
biConsumer.accept(null, (Exception) cause);
}
}
});

View File

@ -120,7 +120,7 @@ public class NioHttpChannel extends AbstractRestChannel {
toClose.add(nioChannel::close);
}
BiConsumer<Void, Throwable> listener = (aVoid, throwable) -> Releasables.close(toClose);
BiConsumer<Void, Exception> listener = (aVoid, ex) -> Releasables.close(toClose);
nioChannel.getContext().sendMessage(new NioHttpResponse(sequence, resp), listener);
success = true;
} finally {

View File

@ -31,6 +31,3 @@ dependencies {
* and will be fixed later.
* Tracked by https://github.com/elastic/elasticsearch/issues/30628
*/
if ("zip".equals(integTestCluster.distribution)) {
integTestRunner.enabled = false
}

View File

@ -1,159 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.rankeval;
import org.elasticsearch.index.rankeval.RankEvalSpec.ScriptWithId;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.test.ESIntegTestCase;
import org.junit.Before;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class SmokeMultipleTemplatesIT extends ESIntegTestCase {
private static final String MATCH_TEMPLATE = "match_template";
@Override
protected Collection<Class<? extends Plugin>> transportClientPlugins() {
return Arrays.asList(RankEvalPlugin.class);
}
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(RankEvalPlugin.class);
}
@Before
public void setup() {
createIndex("test");
ensureGreen();
client().prepareIndex("test", "testtype").setId("1")
.setSource("text", "berlin", "title", "Berlin, Germany").get();
client().prepareIndex("test", "testtype").setId("2")
.setSource("text", "amsterdam").get();
client().prepareIndex("test", "testtype").setId("3")
.setSource("text", "amsterdam").get();
client().prepareIndex("test", "testtype").setId("4")
.setSource("text", "amsterdam").get();
client().prepareIndex("test", "testtype").setId("5")
.setSource("text", "amsterdam").get();
client().prepareIndex("test", "testtype").setId("6")
.setSource("text", "amsterdam").get();
refresh();
}
public void testPrecisionAtRequest() throws IOException {
List<RatedRequest> specifications = new ArrayList<>();
Map<String, Object> ams_params = new HashMap<>();
ams_params.put("querystring", "amsterdam");
RatedRequest amsterdamRequest = new RatedRequest(
"amsterdam_query", createRelevant("2", "3", "4", "5"), ams_params, MATCH_TEMPLATE);
specifications.add(amsterdamRequest);
Map<String, Object> berlin_params = new HashMap<>();
berlin_params.put("querystring", "berlin");
RatedRequest berlinRequest = new RatedRequest(
"berlin_query", createRelevant("1"), berlin_params, MATCH_TEMPLATE);
specifications.add(berlinRequest);
PrecisionAtK metric = new PrecisionAtK();
ScriptWithId template =
new ScriptWithId(
MATCH_TEMPLATE,
new Script(
ScriptType.INLINE,
"mustache", "{\"query\": {\"match\": {\"text\": \"{{querystring}}\"}}}",
new HashMap<>()));
Set<ScriptWithId> templates = new HashSet<>();
templates.add(template);
RankEvalSpec task = new RankEvalSpec(specifications, metric, templates);
RankEvalRequestBuilder builder = new RankEvalRequestBuilder(client(), RankEvalAction.INSTANCE, new RankEvalRequest());
builder.setRankEvalSpec(task);
RankEvalResponse response = client().execute(RankEvalAction.INSTANCE, builder.request().indices("test")).actionGet();
assertEquals(0.9, response.getEvaluationResult(), Double.MIN_VALUE);
}
public void testTemplateWithAggsFails() {
String template = "{ \"aggs\" : { \"avg_grade\" : { \"avg\" : { \"field\" : \"grade\" }}}}";
assertTemplatedRequestFailures(template, "Query in rated requests should not contain aggregations.");
}
public void testTemplateWithSuggestFails() {
String template = "{\"suggest\" : {\"my-suggestion\" : {\"text\" : \"Elastic\",\"term\" : {\"field\" : \"message\"}}}}";
assertTemplatedRequestFailures(template, "Query in rated requests should not contain a suggest section.");
}
public void testTemplateWithHighlighterFails() {
String template = "{\"highlight\" : { \"fields\" : {\"content\" : {}}}}";
assertTemplatedRequestFailures(template, "Query in rated requests should not contain a highlighter section.");
}
public void testTemplateWithProfileFails() {
String template = "{\"profile\" : \"true\" }";
assertTemplatedRequestFailures(template, "Query in rated requests should not use profile.");
}
public void testTemplateWithExplainFails() {
String template = "{\"explain\" : \"true\" }";
assertTemplatedRequestFailures(template, "Query in rated requests should not use explain.");
}
private static void assertTemplatedRequestFailures(String template, String expectedMessage) {
List<RatedDocument> ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1));
RatedRequest ratedRequest = new RatedRequest("id", ratedDocs, Collections.singletonMap("param1", "value1"), "templateId");
Collection<ScriptWithId> templates = Collections.singletonList(new ScriptWithId("templateId",
new Script(ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, template, Collections.emptyMap())));
RankEvalSpec rankEvalSpec = new RankEvalSpec(Collections.singletonList(ratedRequest), new PrecisionAtK(), templates);
RankEvalRequest rankEvalRequest = new RankEvalRequest(rankEvalSpec, new String[] { "test" });
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> client().execute(RankEvalAction.INSTANCE, rankEvalRequest).actionGet());
assertEquals(expectedMessage, e.getMessage());
}
private static List<RatedDocument> createRelevant(String... docs) {
List<RatedDocument> relevant = new ArrayList<>();
for (String doc : docs) {
relevant.add(new RatedDocument("test", doc, Rating.RELEVANT.ordinal()));
}
return relevant;
}
public enum Rating {
IRRELEVANT, RELEVANT;
}
}

View File

@ -0,0 +1,171 @@
setup:
- do:
indices.create:
index: test
body:
settings:
index:
number_of_shards: 1
- do:
index:
index: test
type: _doc
id: 1
body: { "text": "berlin", "title" : "Berlin, Germany" }
- do:
index:
index: test
type: _doc
id: 2
body: { "text": "amsterdam" }
- do:
index:
index: test
type: _doc
id: 3
body: { "text": "amsterdam" }
- do:
index:
index: test
type: _doc
id: 4
body: { "text": "amsterdam" }
- do:
index:
index: test
type: _doc
id: 5
body: { "text": "amsterdam" }
- do:
index:
index: test
type: _doc
id: 6
body: { "text": "amsterdam" }
- do:
indices.refresh: {}
---
"Basic rank-eval request with template":
- skip:
version: " - 6.1.99"
reason: the ranking evaluation feature is available since 6.2
- do:
rank_eval:
body: {
"templates": [ { "id": "match", "template": {"source": "{\"query\": { \"match\" : {\"text\" : \"{{query_string}}\" }}}" }} ],
"requests" : [
{
"id": "amsterdam_query",
"params": { "query_string": "amsterdam" },
"template_id": "match",
"ratings": [
{"_index": "test", "_id": "2", "rating": 1},
{"_index": "test", "_id": "3", "rating": 1},
{"_index": "test", "_id": "4", "rating": 1},
{"_index": "test", "_id": "5", "rating": 1},]
},
{
"id" : "berlin_query",
"params": { "query_string": "berlin" },
"template_id": "match",
"ratings": [{"_index": "test", "_id": "1", "rating": 1}]
}
],
"metric" : { "precision": { }}
}
- match: {quality_level: 0.9}
- match: {details.amsterdam_query.unknown_docs.0._id: "6"}
---
"Test illegal request parts":
- do:
catch: /Query in rated requests should not contain aggregations./
rank_eval:
body: {
"templates": [ { "id": "match", "template": {"source": "{ \"aggs\" : { \"avg_grade\" : { \"avg\" : { \"field\" : \"grade\" }}}}" }} ],
"requests" : [
{
"id": "amsterdam_query",
"params": { "query_string": "amsterdam" },
"template_id": "match",
"ratings": []
}
],
"metric" : { "precision": { }}
}
- do:
catch: /Query in rated requests should not contain a suggest section./
rank_eval:
body: {
"templates": [ { "id": "match", "template": {"source": "{\"suggest\" : {\"my-suggestion\" : {\"text\" : \"Elastic\",\"term\" : {\"field\" : \"message\"}}}}" }} ],
"requests" : [
{
"id": "amsterdam_query",
"params": { "query_string": "amsterdam" },
"template_id": "match",
"ratings": []
}
],
"metric" : { "precision": { }}
}
- do:
catch: /Query in rated requests should not contain a highlighter section./
rank_eval:
body: {
"templates": [ { "id": "match", "template": {"source": "{\"highlight\" : { \"fields\" : {\"content\" : {}}}}" }} ],
"requests" : [
{
"id": "amsterdam_query",
"params": { "query_string": "amsterdam" },
"template_id": "match",
"ratings": []
}
],
"metric" : { "precision": { }}
}
- do:
catch: /Query in rated requests should not use profile./
rank_eval:
body: {
"templates": [ { "id": "match", "template": {"source": "{\"profile\" : \"true\" }" }} ],
"requests" : [
{
"id": "amsterdam_query",
"params": { "query_string": "amsterdam" },
"template_id": "match",
"ratings": []
}
],
"metric" : { "precision": { }}
}
- do:
catch: /Query in rated requests should not use explain./
rank_eval:
body: {
"templates": [ { "id": "match", "template": {"source": "{\"explain\" : \"true\" }" }} ],
"requests" : [
{
"id": "amsterdam_query",
"params": { "query_string": "amsterdam" },
"template_id": "match",
"ratings": []
}
],
"metric" : { "precision": { }}
}

View File

@ -1,72 +0,0 @@
---
"Template request":
- skip:
version: " - 6.1.99"
reason: the ranking evaluation feature is available since 6.2
- do:
indices.create:
index: foo
body:
settings:
index:
number_of_shards: 1
- do:
index:
index: foo
type: bar
id: doc1
body: { "text": "berlin" }
- do:
index:
index: foo
type: bar
id: doc2
body: { "text": "amsterdam" }
- do:
index:
index: foo
type: bar
id: doc3
body: { "text": "amsterdam" }
- do:
index:
index: foo
type: bar
id: doc4
body: { "text": "something about amsterdam and berlin" }
- do:
indices.refresh: {}
- do:
rank_eval:
body: {
"templates": [ { "id": "match", "template": {"source": "{\"query\": { \"match\" : {\"text\" : \"{{query_string}}\" }}}" }} ],
"requests" : [
{
"id": "amsterdam_query",
"params": { "query_string": "amsterdam" },
"template_id": "match",
"ratings": [
{"_index": "foo", "_id": "doc1", "rating": 0},
{"_index": "foo", "_id": "doc2", "rating": 1},
{"_index": "foo", "_id": "doc3", "rating": 1}]
},
{
"id" : "berlin_query",
"params": { "query_string": "berlin" },
"template_id": "match",
"ratings": [{"_index": "foo", "_id": "doc1", "rating": 1}]
}
],
"metric" : { "precision": { }}
}
- match: {quality_level: 0.5833333333333333}
- match: {details.berlin_query.unknown_docs.0._id: "doc4"}
- match: {details.amsterdam_query.unknown_docs.0._id: "doc4"}

119
qa/vagrant/README.md Normal file
View File

@ -0,0 +1,119 @@
# packaging tests
This project contains tests that verify the distributions we build work
correctly on the operating systems we support. They're intended to cover the
steps a user would take when installing and configuring an Elasticsearch
distribution. They're not intended to have significant coverage of the behavior
of Elasticsearch's features.
There are two types of tests in this project. The old tests live in
`src/test/` and are written in [Bats](https://github.com/sstephenson/bats),
which is a flavor of bash scripts that run as unit tests. These tests are
deprecated because Bats is unmaintained and cannot run on Windows.
The new tests live in `src/main/` and are written in Java. Like the old tests,
this project's tests are run inside the VM, not on your host. All new packaging
tests should be added to this set of tests if possible.
## Running these tests
See the section in [TESTING.asciidoc](../../TESTING.asciidoc#testing-packaging)
## Adding a new test class
When gradle runs the packaging tests on a VM, it runs the full suite by
default. To add a test class to the suite, add its `class` to the
`@SuiteClasses` annotation in [PackagingTests.java](src/main/java/org/elasticsearch/packaging/PackagingTests.java).
If a test class is added to the project but not to this annotation, it will not
run in CI jobs. The test classes are run in the order they are listed in the
annotation.
## Choosing which distributions to test
Distributions are represented by [enum values](src/main/java/org/elasticsearch/packaging/util/Distribution.java)
which know if they are compatible with the platform the tests are currently
running on. To skip a test if the distribution it's using isn't compatible with
the current platform, put this [assumption](https://github.com/junit-team/junit4/wiki/assumptions-with-assume)
in your test method or in a `@Before` method
```java
assumeTrue(distribution.packaging.compatible);
```
Similarly if you write a test that is intended only for particular platforms,
you can make an assumption using the constants and methods in [Platforms.java](src/main/java/org/elasticsearch/packaging/util/Platforms.java)
```java
assumeTrue("only run on windows", Platforms.WINDOWS);
assumeTrue("only run if using systemd", Platforms.isSystemd());
```
## Writing a test that covers multiple distributions
It seems like the way to do this that makes it the most straightforward to run
and reproduce specific test cases is to create a test case class with an
abstract method that provides the distribution
```java
public abstract class MyTestCase {
@Test
public void myTest() { /* do something with the value of #distribution() */ }
abstract Distribution distribution();
}
```
and then for each distribution you want to test, create a subclass
```java
public class MyTestDefaultTar extends MyTestCase {
@Override
Distribution distribution() { return Distribution.DEFAULT_TAR; }
}
```
That way when a test fails the user gets told explicitly that `MyTestDefaultTar`
failed, and to reproduce it they should run that class. See [ArchiveTestCase](src/main/java/org/elasticsearch/packaging/test/ArchiveTestCase.java)
and its children for an example of this.
## Running external commands
In general it's probably best to avoid running external commands when a good
Java alternative exists. For example most filesystem operations can be done with
the java.nio.file APIs. For those that aren't, use an instance of [Shell](src/main/java/org/elasticsearch/packaging/util/Shell.java)
Despite the name, commands run with this class are not run in a shell, and any
familiar features of shells like variables or expansion won't work.
If you do need the shell, you must explicitly invoke the shell's command. For
example to run a command with Bash, use the `bash -c command` syntax. Note that
the entire script must be in a single string argument
```java
Shell sh = new Shell();
sh.run("bash", "-c", "echo $foo; echo $bar");
```
Similary for powershell - again, the entire powershell script must go in a
single string argument
```java
sh.run("powershell.exe", "-Command", "Write-Host $foo; Write-Host $bar");
```
On Linux, most commands you'll want to use will be executable files and will
work fine without a shell
```java
sh.run("tar", "-xzpf", "elasticsearch-6.1.0.tar.gz");
```
On Windows you'll mostly want to use powershell as it can do a lot more and
gives much better feedback than Windows' legacy command line. Unfortunately that
means that you'll need to use the `powershell.exe -Command` syntax as
powershell's [Cmdlets](https://msdn.microsoft.com/en-us/library/ms714395.aspx)
don't correspond to executable files and are not runnable by `Runtime` directly.
When writing powershell commands this way, make sure to test them as some types
of formatting can cause it to return a successful exit code but not run
anything.

View File

@ -31,8 +31,7 @@ import org.elasticsearch.packaging.util.Installation;
import static org.elasticsearch.packaging.util.Cleanup.cleanEverything;
import static org.elasticsearch.packaging.util.Archives.installArchive;
import static org.elasticsearch.packaging.util.Archives.verifyArchiveInstallation;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assume.assumeThat;
import static org.junit.Assume.assumeTrue;
/**
* Tests that apply to the archive distributions (tar, zip). To add a case for a distribution, subclass and
@ -54,7 +53,7 @@ public abstract class ArchiveTestCase {
@Before
public void onlyCompatibleDistributions() {
assumeThat(distribution().packaging.compatible, is(true));
assumeTrue("only compatible distributions", distribution().packaging.compatible);
}
@Test

View File

@ -144,25 +144,18 @@ setup:
- length: { aggregations.ip_range.buckets: 3 }
# ip_range does not automatically add keys to buckets, see #21045
# - match: { aggregations.ip_range.buckets.0.key: "*-192.168.0.0" }
- is_false: aggregations.ip_range.buckets.0.from
- match: { aggregations.ip_range.buckets.0.to: "192.168.0.0" }
- match: { aggregations.ip_range.buckets.0.doc_count: 1 }
# - match: { aggregations.ip_range.buckets.1.key: "192.168.0.0-192.169.0.0" }
- match: { aggregations.ip_range.buckets.1.from: "192.168.0.0" }
- match: { aggregations.ip_range.buckets.1.to: "192.169.0.0" }
- match: { aggregations.ip_range.buckets.1.doc_count: 2 }
# - match: { aggregations.ip_range.buckets.2.key: "192.169.0.0-*" }
- match: { aggregations.ip_range.buckets.2.from: "192.169.0.0" }
- is_false: aggregations.ip_range.buckets.2.to
@ -177,24 +170,18 @@ setup:
- length: { aggregations.ip_range.buckets: 3 }
# - match: { aggregations.ip_range.buckets.0.key: "*-192.168.0.0" }
- is_false: aggregations.ip_range.buckets.0.from
- match: { aggregations.ip_range.buckets.0.to: "192.168.0.0" }
- match: { aggregations.ip_range.buckets.0.doc_count: 1 }
# - match: { aggregations.ip_range.buckets.1.key: "192.168.0.0-192.169.0.0" }
- match: { aggregations.ip_range.buckets.1.from: "192.168.0.0" }
- match: { aggregations.ip_range.buckets.1.to: "192.169.0.0" }
- match: { aggregations.ip_range.buckets.1.doc_count: 2 }
# - match: { aggregations.ip_range.buckets.2.key: "192.169.0.0-*" }
- match: { aggregations.ip_range.buckets.2.from: "192.169.0.0" }
- is_false: aggregations.ip_range.buckets.2.to
@ -223,6 +210,21 @@ setup:
- match: { aggregations.ip_range.buckets.1.doc_count: 2 }
---
"IP Range Key Generation":
- skip:
version: " - 6.3.99"
reason: "Before 6.4.0, ip_range did not always generate bucket keys (see #21045)."
- do:
search:
body: { "size" : 0, "aggs" : { "ip_range" : { "ip_range" : { "field" : "ip", "ranges": [ { "to": "192.168.0.0" }, { "from": "192.168.0.0", "to": "192.169.0.0" }, { "from": "192.169.0.0" } ] } } } }
- length: { aggregations.ip_range.buckets: 3 }
- match: { aggregations.ip_range.buckets.0.key: "*-192.168.0.0" }
- match: { aggregations.ip_range.buckets.1.key: "192.168.0.0-192.169.0.0" }
- match: { aggregations.ip_range.buckets.2.key: "192.169.0.0-*" }
---
"Date range":
- do:

View File

@ -51,9 +51,6 @@ setup:
---
"Verify created repository":
- skip:
version: " - 6.99.99"
reason: AwaitsFix for https://github.com/elastic/elasticsearch/issues/30807
- do:
snapshot.verify_repository:
repository: test_repo_get_2

View File

@ -90,18 +90,12 @@ public interface ActionListener<Response> {
* @param <Response> the type of the response
* @return a bi consumer that will complete the wrapped listener
*/
static <Response> BiConsumer<Response, Throwable> toBiConsumer(ActionListener<Response> listener) {
static <Response> BiConsumer<Response, Exception> toBiConsumer(ActionListener<Response> listener) {
return (response, throwable) -> {
if (throwable == null) {
listener.onResponse(response);
} else {
if (throwable instanceof Exception) {
listener.onFailure((Exception) throwable);
} else if (throwable instanceof Error) {
throw (Error) throwable;
} else {
throw new AssertionError("Should have been either Error or Exception", throwable);
}
listener.onFailure(throwable);
}
};
}

View File

@ -151,7 +151,7 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
if (Version.CURRENT.onOrAfter(Version.V_7_0_0_alpha1)) {
if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
out.writeList(getNodes());
} else {
clusterName.writeTo(out);

View File

@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
@ -33,10 +34,6 @@ import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.RemoteClusterAware;
import org.elasticsearch.transport.RemoteClusterService;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
@ -49,7 +46,6 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
private final ClusterService clusterService;
private final TransportFieldCapabilitiesIndexAction shardAction;
private final RemoteClusterService remoteClusterService;
private final TransportService transportService;
@Inject
public TransportFieldCapabilitiesAction(Settings settings, TransportService transportService,
@ -62,7 +58,6 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
actionFilters, indexNameExpressionResolver, FieldCapabilitiesRequest::new);
this.clusterService = clusterService;
this.remoteClusterService = transportService.getRemoteClusterService();
this.transportService = transportService;
this.shardAction = shardAction;
}
@ -118,47 +113,20 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
for (Map.Entry<String, OriginalIndices> remoteIndices : remoteClusterIndices.entrySet()) {
String clusterAlias = remoteIndices.getKey();
OriginalIndices originalIndices = remoteIndices.getValue();
// if we are connected this is basically a no-op, if we are not we try to connect in parallel in a non-blocking fashion
remoteClusterService.ensureConnected(clusterAlias, ActionListener.wrap(v -> {
Transport.Connection connection = remoteClusterService.getConnection(clusterAlias);
FieldCapabilitiesRequest remoteRequest = new FieldCapabilitiesRequest();
remoteRequest.setMergeResults(false); // we need to merge on this node
remoteRequest.indicesOptions(originalIndices.indicesOptions());
remoteRequest.indices(originalIndices.indices());
remoteRequest.fields(request.fields());
transportService.sendRequest(connection, FieldCapabilitiesAction.NAME, remoteRequest, TransportRequestOptions.EMPTY,
new TransportResponseHandler<FieldCapabilitiesResponse>() {
@Override
public FieldCapabilitiesResponse newInstance() {
return new FieldCapabilitiesResponse();
}
@Override
public void handleResponse(FieldCapabilitiesResponse response) {
try {
for (FieldCapabilitiesIndexResponse res : response.getIndexResponses()) {
indexResponses.add(new FieldCapabilitiesIndexResponse(RemoteClusterAware.
buildRemoteIndexName(clusterAlias, res.getIndexName()), res.get()));
}
} finally {
onResponse.run();
}
}
@Override
public void handleException(TransportException exp) {
onResponse.run();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
});
}, e -> onResponse.run()));
Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias);
FieldCapabilitiesRequest remoteRequest = new FieldCapabilitiesRequest();
remoteRequest.setMergeResults(false); // we need to merge on this node
remoteRequest.indicesOptions(originalIndices.indicesOptions());
remoteRequest.indices(originalIndices.indices());
remoteRequest.fields(request.fields());
remoteClusterClient.fieldCaps(remoteRequest, ActionListener.wrap(response -> {
for (FieldCapabilitiesIndexResponse res : response.getIndexResponses()) {
indexResponses.add(new FieldCapabilitiesIndexResponse(RemoteClusterAware.
buildRemoteIndexName(clusterAlias, res.getIndexName()), res.get()));
}
onResponse.run();
}, failure -> onResponse.run()));
}
}
}

View File

@ -25,13 +25,15 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
import java.util.Objects;
public class PutPipelineRequest extends AcknowledgedRequest<PutPipelineRequest> {
public class PutPipelineRequest extends AcknowledgedRequest<PutPipelineRequest> implements ToXContentObject {
private String id;
private BytesReference source;
@ -96,4 +98,14 @@ public class PutPipelineRequest extends AcknowledgedRequest<PutPipelineRequest>
out.writeEnum(xContentType);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (source != null) {
builder.rawValue(source.streamInput(), xContentType);
} else {
builder.startObject().endObject();
}
return builder;
}
}

View File

@ -0,0 +1,62 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.ingest;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
public class PutPipelineResponse extends AcknowledgedResponse implements ToXContentObject {
private static final ConstructingObjectParser<PutPipelineResponse, Void> PARSER = new ConstructingObjectParser<>(
"cluster_put_pipeline", true, args -> new PutPipelineResponse((boolean) args[0]));
static {
declareAcknowledgedField(PARSER);
}
public PutPipelineResponse() {
}
public PutPipelineResponse(boolean acknowledged) {
super(acknowledged);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
}
public static PutPipelineResponse fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.client.transport;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
@ -129,7 +130,8 @@ public abstract class TransportClient extends AbstractClient {
providedSettings = Settings.builder().put(providedSettings).put(Node.NODE_NAME_SETTING.getKey(), "_client_").build();
}
final PluginsService pluginsService = newPluginService(providedSettings, plugins);
final Settings settings = Settings.builder().put(defaultSettings).put(pluginsService.updatedSettings()).build();
final Settings settings = Settings.builder().put(defaultSettings).put(pluginsService.updatedSettings()).put(ThreadContext.PREFIX
+ "." + "transport_client", true).build();
final List<Closeable> resourcesToClose = new ArrayList<>();
final ThreadPool threadPool = new ThreadPool(settings);
resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS));

View File

@ -69,8 +69,11 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.ingest.IngestMetadata;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksNodeService;
import org.elasticsearch.plugins.ClusterPlugin;
import org.elasticsearch.script.ScriptMetaData;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskResultsService;
import java.util.ArrayList;
@ -140,6 +143,10 @@ public class ClusterModule extends AbstractModule {
registerMetaDataCustom(entries, IngestMetadata.TYPE, IngestMetadata::new, IngestMetadata::readDiffFrom);
registerMetaDataCustom(entries, ScriptMetaData.TYPE, ScriptMetaData::new, ScriptMetaData::readDiffFrom);
registerMetaDataCustom(entries, IndexGraveyard.TYPE, IndexGraveyard::new, IndexGraveyard::readDiffFrom);
registerMetaDataCustom(entries, PersistentTasksCustomMetaData.TYPE, PersistentTasksCustomMetaData::new,
PersistentTasksCustomMetaData::readDiffFrom);
// Task Status (not Diffable)
entries.add(new Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new));
return entries;
}
@ -154,6 +161,8 @@ public class ClusterModule extends AbstractModule {
ScriptMetaData::fromXContent));
entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(IndexGraveyard.TYPE),
IndexGraveyard::fromXContent));
entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(PersistentTasksCustomMetaData.TYPE),
PersistentTasksCustomMetaData::fromXContent));
return entries;
}

View File

@ -20,7 +20,6 @@
package org.elasticsearch.cluster.service;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import java.util.function.Supplier;
@ -38,11 +37,29 @@ public interface ClusterApplier {
* @param clusterStateSupplier the cluster state supplier which provides the latest cluster state to apply
* @param listener callback that is invoked after cluster state is applied
*/
void onNewClusterState(String source, Supplier<ClusterState> clusterStateSupplier, ClusterStateTaskListener listener);
void onNewClusterState(String source, Supplier<ClusterState> clusterStateSupplier, ClusterApplyListener listener);
/**
* Creates a new cluster state builder that is initialized with the cluster name and all initial cluster state customs.
*/
ClusterState.Builder newClusterStateBuilder();
/**
* Listener for results of cluster state application
*/
interface ClusterApplyListener {
/**
* Called on successful cluster state application
* @param source information where the cluster state came from
*/
default void onSuccess(String source) {
}
/**
* Called on failure during cluster state application
* @param source information where the cluster state came from
* @param e exception that occurred
*/
void onFailure(String source, Exception e);
}
}

View File

@ -27,7 +27,6 @@ import org.elasticsearch.cluster.ClusterStateApplier;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ClusterStateObserver;
import org.elasticsearch.cluster.ClusterStateTaskConfig;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.LocalNodeMasterListener;
import org.elasticsearch.cluster.NodeConnectionsService;
import org.elasticsearch.cluster.TimeoutClusterStateListener;
@ -141,10 +140,10 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
}
class UpdateTask extends SourcePrioritizedRunnable implements Function<ClusterState, ClusterState> {
final ClusterStateTaskListener listener;
final ClusterApplyListener listener;
final Function<ClusterState, ClusterState> updateFunction;
UpdateTask(Priority priority, String source, ClusterStateTaskListener listener,
UpdateTask(Priority priority, String source, ClusterApplyListener listener,
Function<ClusterState, ClusterState> updateFunction) {
super(priority, source);
this.listener = listener;
@ -301,7 +300,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
}
public void runOnApplierThread(final String source, Consumer<ClusterState> clusterStateConsumer,
final ClusterStateTaskListener listener, Priority priority) {
final ClusterApplyListener listener, Priority priority) {
submitStateUpdateTask(source, ClusterStateTaskConfig.build(priority),
(clusterState) -> {
clusterStateConsumer.accept(clusterState);
@ -311,13 +310,13 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
}
public void runOnApplierThread(final String source, Consumer<ClusterState> clusterStateConsumer,
final ClusterStateTaskListener listener) {
final ClusterApplyListener listener) {
runOnApplierThread(source, clusterStateConsumer, listener, Priority.HIGH);
}
@Override
public void onNewClusterState(final String source, final Supplier<ClusterState> clusterStateSupplier,
final ClusterStateTaskListener listener) {
final ClusterApplyListener listener) {
Function<ClusterState, ClusterState> applyFunction = currentState -> {
ClusterState nextState = clusterStateSupplier.get();
if (nextState != null) {
@ -331,12 +330,12 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
private void submitStateUpdateTask(final String source, final ClusterStateTaskConfig config,
final Function<ClusterState, ClusterState> executor,
final ClusterStateTaskListener listener) {
final ClusterApplyListener listener) {
if (!lifecycle.started()) {
return;
}
try {
UpdateTask updateTask = new UpdateTask(config.priority(), source, new SafeClusterStateTaskListener(listener, logger), executor);
UpdateTask updateTask = new UpdateTask(config.priority(), source, new SafeClusterApplyListener(listener, logger), executor);
if (config.timeout() != null) {
threadPoolExecutor.execute(updateTask, config.timeout(),
() -> threadPool.generic().execute(
@ -417,7 +416,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
}
if (previousClusterState == newClusterState) {
task.listener.clusterStateProcessed(task.source, newClusterState, newClusterState);
task.listener.onSuccess(task.source);
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(currentTimeInNanos() - startTimeNS)));
logger.debug("processing [{}]: took [{}] no change in cluster state", task.source, executionTime);
warnAboutSlowTaskIfNeeded(executionTime, task.source);
@ -486,7 +485,7 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
callClusterStateListeners(clusterChangedEvent);
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
task.listener.onSuccess(task.source);
}
private void callClusterStateAppliers(ClusterChangedEvent clusterChangedEvent) {
@ -511,11 +510,11 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
});
}
private static class SafeClusterStateTaskListener implements ClusterStateTaskListener {
private final ClusterStateTaskListener listener;
private static class SafeClusterApplyListener implements ClusterApplyListener {
private final ClusterApplyListener listener;
private final Logger logger;
SafeClusterStateTaskListener(ClusterStateTaskListener listener, Logger logger) {
SafeClusterApplyListener(ClusterApplyListener listener, Logger logger) {
this.listener = listener;
this.logger = logger;
}
@ -532,14 +531,12 @@ public class ClusterApplierService extends AbstractLifecycleComponent implements
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
try {
listener.clusterStateProcessed(source, oldState, newState);
listener.onSuccess(source);
} catch (Exception e) {
logger.error(new ParameterizedMessage(
"exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" +
"{}\nnew cluster state:\n{}",
source, oldState, newState), e);
"exception thrown by listener while notifying of cluster state processed from [{}]", source), e);
}
}
}

View File

@ -68,6 +68,7 @@ import java.util.function.ToLongBiFunction;
* @param <V> The type of the values
*/
public class Cache<K, V> {
// positive if entries have an expiration
private long expireAfterAccessNanos = -1;
@ -282,6 +283,39 @@ public class Cache<K, V> {
}
}
/**
* remove an entry from the segment iff the future is done and the value is equal to the
* expected value
*
* @param key the key of the entry to remove from the cache
* @param value the value expected to be associated with the key
* @param onRemoval a callback for the removed entry
*/
void remove(K key, V value, Consumer<CompletableFuture<Entry<K, V>>> onRemoval) {
CompletableFuture<Entry<K, V>> future;
boolean removed = false;
try (ReleasableLock ignored = writeLock.acquire()) {
future = map.get(key);
try {
if (future != null) {
if (future.isDone()) {
Entry<K, V> entry = future.get();
if (Objects.equals(value, entry.value)) {
removed = map.remove(key, future);
}
}
}
} catch (ExecutionException | InterruptedException e) {
throw new IllegalStateException(e);
}
}
if (future != null && removed) {
segmentStats.eviction();
onRemoval.accept(future);
}
}
private static class SegmentStats {
private final LongAdder hits = new LongAdder();
private final LongAdder misses = new LongAdder();
@ -314,7 +348,7 @@ public class Cache<K, V> {
Entry<K, V> tail;
// lock protecting mutations to the LRU list
private ReleasableLock lruLock = new ReleasableLock(new ReentrantLock());
private final ReleasableLock lruLock = new ReleasableLock(new ReentrantLock());
/**
* Returns the value to which the specified key is mapped, or null if this map contains no mapping for the key.
@ -455,6 +489,19 @@ public class Cache<K, V> {
}
}
private final Consumer<CompletableFuture<Entry<K, V>>> invalidationConsumer = f -> {
try {
Entry<K, V> entry = f.get();
try (ReleasableLock ignored = lruLock.acquire()) {
delete(entry, RemovalNotification.RemovalReason.INVALIDATED);
}
} catch (ExecutionException e) {
// ok
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
};
/**
* Invalidate the association for the specified key. A removal notification will be issued for invalidated
* entries with {@link org.elasticsearch.common.cache.RemovalNotification.RemovalReason} INVALIDATED.
@ -463,18 +510,20 @@ public class Cache<K, V> {
*/
public void invalidate(K key) {
CacheSegment<K, V> segment = getCacheSegment(key);
segment.remove(key, f -> {
try {
Entry<K, V> entry = f.get();
try (ReleasableLock ignored = lruLock.acquire()) {
delete(entry, RemovalNotification.RemovalReason.INVALIDATED);
}
} catch (ExecutionException e) {
// ok
} catch (InterruptedException e) {
throw new IllegalStateException(e);
}
});
segment.remove(key, invalidationConsumer);
}
/**
* Invalidate the entry for the specified key and value. If the value provided is not equal to the value in
* the cache, no removal will occur. A removal notification will be issued for invalidated
* entries with {@link org.elasticsearch.common.cache.RemovalNotification.RemovalReason} INVALIDATED.
*
* @param key the key whose mapping is to be invalidated from the cache
* @param value the expected value that should be associated with the key
*/
public void invalidate(K key, V value) {
CacheSegment<K, V> segment = getCacheSegment(key);
segment.remove(key, value, invalidationConsumer);
}
/**
@ -625,7 +674,7 @@ public class Cache<K, V> {
Entry<K, V> entry = current;
if (entry != null) {
CacheSegment<K, V> segment = getCacheSegment(entry.key);
segment.remove(entry.key, f -> {});
segment.remove(entry.key, entry.value, f -> {});
try (ReleasableLock ignored = lruLock.acquire()) {
current = null;
delete(entry, RemovalNotification.RemovalReason.INVALIDATED);
@ -710,7 +759,7 @@ public class Cache<K, V> {
CacheSegment<K, V> segment = getCacheSegment(entry.key);
if (segment != null) {
segment.remove(entry.key, f -> {});
segment.remove(entry.key, entry.value, f -> {});
}
delete(entry, RemovalNotification.RemovalReason.EVICTED);
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.common.geo;
import org.apache.lucene.document.LatLonDocValuesField;
import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.geo.GeoEncodingUtils;
import org.apache.lucene.geo.Rectangle;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.util.BitUtil;
import org.apache.lucene.util.BytesRef;
@ -85,21 +86,27 @@ public final class GeoPoint implements ToXContentFragment {
public GeoPoint resetFromString(String value, final boolean ignoreZValue) {
if (value.contains(",")) {
String[] vals = value.split(",");
if (vals.length > 3) {
throw new ElasticsearchParseException("failed to parse [{}], expected 2 or 3 coordinates "
+ "but found: [{}]", vals.length);
}
double lat = Double.parseDouble(vals[0].trim());
double lon = Double.parseDouble(vals[1].trim());
if (vals.length > 2) {
GeoPoint.assertZValue(ignoreZValue, Double.parseDouble(vals[2].trim()));
}
return reset(lat, lon);
return resetFromCoordinates(value, ignoreZValue);
}
return resetFromGeoHash(value);
}
public GeoPoint resetFromCoordinates(String value, final boolean ignoreZValue) {
String[] vals = value.split(",");
if (vals.length > 3) {
throw new ElasticsearchParseException("failed to parse [{}], expected 2 or 3 coordinates "
+ "but found: [{}]", vals.length);
}
double lat = Double.parseDouble(vals[0].trim());
double lon = Double.parseDouble(vals[1].trim());
if (vals.length > 2) {
GeoPoint.assertZValue(ignoreZValue, Double.parseDouble(vals[2].trim()));
}
return reset(lat, lon);
}
public GeoPoint resetFromIndexHash(long hash) {
lon = GeoHashUtils.decodeLongitude(hash);
lat = GeoHashUtils.decodeLatitude(hash);

View File

@ -387,6 +387,25 @@ public class GeoUtils {
}
}
/**
* Represents the point of the geohash cell that should be used as the value of geohash
*/
public enum EffectivePoint {
TOP_LEFT,
TOP_RIGHT,
BOTTOM_LEFT,
BOTTOM_RIGHT
}
/**
* Parse a geopoint represented as an object, string or an array. If the geopoint is represented as a geohash,
* the left bottom corner of the geohash cell is used as the geopoint coordinates.GeoBoundingBoxQueryBuilder.java
*/
public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, final boolean ignoreZValue)
throws IOException, ElasticsearchParseException {
return parseGeoPoint(parser, point, ignoreZValue, EffectivePoint.BOTTOM_LEFT);
}
/**
* Parse a {@link GeoPoint} with a {@link XContentParser}. A geopoint has one of the following forms:
*
@ -401,7 +420,7 @@ public class GeoUtils {
* @param point A {@link GeoPoint} that will be reset by the values parsed
* @return new {@link GeoPoint} parsed from the parse
*/
public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, final boolean ignoreZValue)
public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, final boolean ignoreZValue, EffectivePoint effectivePoint)
throws IOException, ElasticsearchParseException {
double lat = Double.NaN;
double lon = Double.NaN;
@ -458,7 +477,7 @@ public class GeoUtils {
if(!Double.isNaN(lat) || !Double.isNaN(lon)) {
throw new ElasticsearchParseException("field must be either lat/lon or geohash");
} else {
return point.resetFromGeoHash(geohash);
return parseGeoHash(point, geohash, effectivePoint);
}
} else if (numberFormatException != null) {
throw new ElasticsearchParseException("[{}] and [{}] must be valid double values", numberFormatException, LATITUDE,
@ -489,12 +508,36 @@ public class GeoUtils {
}
return point.reset(lat, lon);
} else if(parser.currentToken() == Token.VALUE_STRING) {
return point.resetFromString(parser.text(), ignoreZValue);
String val = parser.text();
if (val.contains(",")) {
return point.resetFromString(val, ignoreZValue);
} else {
return parseGeoHash(point, val, effectivePoint);
}
} else {
throw new ElasticsearchParseException("geo_point expected");
}
}
private static GeoPoint parseGeoHash(GeoPoint point, String geohash, EffectivePoint effectivePoint) {
if (effectivePoint == EffectivePoint.BOTTOM_LEFT) {
return point.resetFromGeoHash(geohash);
} else {
Rectangle rectangle = GeoHashUtils.bbox(geohash);
switch (effectivePoint) {
case TOP_LEFT:
return point.reset(rectangle.maxLat, rectangle.minLon);
case TOP_RIGHT:
return point.reset(rectangle.maxLat, rectangle.maxLon);
case BOTTOM_RIGHT:
return point.reset(rectangle.minLat, rectangle.maxLon);
default:
throw new IllegalArgumentException("Unsupported effective point " + effectivePoint);
}
}
}
/**
* Parse a precision that can be expressed as an integer or a distance measure like "1km", "10m".
*

View File

@ -24,8 +24,8 @@ import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Scorer;
import org.elasticsearch.script.ExplainableSearchScript;
import org.elasticsearch.script.ScoreScript;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.SearchScript;
import java.io.IOException;
import java.util.Objects;
@ -58,10 +58,10 @@ public class ScriptScoreFunction extends ScoreFunction {
private final Script sScript;
private final SearchScript.LeafFactory script;
private final ScoreScript.LeafFactory script;
public ScriptScoreFunction(Script sScript, SearchScript.LeafFactory script) {
public ScriptScoreFunction(Script sScript, ScoreScript.LeafFactory script) {
super(CombineFunction.REPLACE);
this.sScript = sScript;
this.script = script;
@ -69,7 +69,7 @@ public class ScriptScoreFunction extends ScoreFunction {
@Override
public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) throws IOException {
final SearchScript leafScript = script.newInstance(ctx);
final ScoreScript leafScript = script.newInstance(ctx);
final CannedScorer scorer = new CannedScorer();
leafScript.setScorer(scorer);
return new LeafScoreFunction() {
@ -78,7 +78,7 @@ public class ScriptScoreFunction extends ScoreFunction {
leafScript.setDocument(docId);
scorer.docid = docId;
scorer.score = subQueryScore;
double result = leafScript.runAsDouble();
double result = leafScript.execute();
return result;
}

View File

@ -0,0 +1,115 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.common.collect.Tuple;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
/**
* A future implementation that allows for the result to be passed to listeners waiting for
* notification. This is useful for cases where a computation is requested many times
* concurrently, but really only needs to be performed a single time. Once the computation
* has been performed the registered listeners will be notified by submitting a runnable
* for execution in the provided {@link ExecutorService}. If the computation has already
* been performed, a request to add a listener will simply result in execution of the listener
* on the calling thread.
*/
public final class ListenableFuture<V> extends BaseFuture<V> implements ActionListener<V> {
private volatile boolean done = false;
private final List<Tuple<ActionListener<V>, ExecutorService>> listeners = new ArrayList<>();
/**
* Adds a listener to this future. If the future has not yet completed, the listener will be
* notified of a response or exception in a runnable submitted to the ExecutorService provided.
* If the future has completed, the listener will be notified immediately without forking to
* a different thread.
*/
public void addListener(ActionListener<V> listener, ExecutorService executor) {
if (done) {
// run the callback directly, we don't hold the lock and don't need to fork!
notifyListener(listener, EsExecutors.newDirectExecutorService());
} else {
final boolean run;
// check done under lock since it could have been modified and protect modifications
// to the list under lock
synchronized (this) {
if (done) {
run = true;
} else {
listeners.add(new Tuple<>(listener, executor));
run = false;
}
}
if (run) {
// run the callback directly, we don't hold the lock and don't need to fork!
notifyListener(listener, EsExecutors.newDirectExecutorService());
}
}
}
@Override
protected synchronized void done() {
done = true;
listeners.forEach(t -> notifyListener(t.v1(), t.v2()));
// release references to any listeners as we no longer need them and will live
// much longer than the listeners in most cases
listeners.clear();
}
private void notifyListener(ActionListener<V> listener, ExecutorService executorService) {
try {
executorService.submit(() -> {
try {
// call get in a non-blocking fashion as we could be on a network thread
// or another thread like the scheduler, which we should never block!
V value = FutureUtils.get(this, 0L, TimeUnit.NANOSECONDS);
listener.onResponse(value);
} catch (Exception e) {
listener.onFailure(e);
}
});
} catch (Exception e) {
listener.onFailure(e);
}
}
@Override
public void onResponse(V v) {
final boolean set = set(v);
if (set == false) {
throw new IllegalStateException("did not set value, value or exception already set?");
}
}
@Override
public void onFailure(Exception e) {
final boolean set = setException(e);
if (set == false) {
throw new IllegalStateException("did not set exception, value already set or exception already set?");
}
}
}

View File

@ -22,18 +22,16 @@ package org.elasticsearch.discovery.single;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterApplier;
import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener;
import org.elasticsearch.cluster.service.MasterService;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryStats;
import org.elasticsearch.discovery.zen.PendingClusterStateStats;
import org.elasticsearch.discovery.zen.PublishClusterStateStats;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@ -65,9 +63,9 @@ public class SingleNodeDiscovery extends AbstractLifecycleComponent implements D
clusterState = event.state();
CountDownLatch latch = new CountDownLatch(1);
ClusterStateTaskListener listener = new ClusterStateTaskListener() {
ClusterApplyListener listener = new ClusterApplyListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
latch.countDown();
ackListener.onNodeAck(transportService.getLocalNode(), null);
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.discovery.zen;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
@ -34,12 +33,11 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.service.ClusterApplier;
import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener;
import org.elasticsearch.cluster.service.MasterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
@ -789,9 +787,9 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
clusterApplier.onNewClusterState("apply cluster state (from master [" + reason + "])",
this::clusterState,
new ClusterStateTaskListener() {
new ClusterApplyListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
try {
pendingStatesQueue.markAsProcessed(newClusterState);
} catch (Exception e) {

View File

@ -491,19 +491,19 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
right = parser.doubleValue();
} else {
if (TOP_LEFT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
GeoUtils.parseGeoPoint(parser, sparse);
GeoUtils.parseGeoPoint(parser, sparse, false, GeoUtils.EffectivePoint.TOP_LEFT);
top = sparse.getLat();
left = sparse.getLon();
} else if (BOTTOM_RIGHT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
GeoUtils.parseGeoPoint(parser, sparse);
GeoUtils.parseGeoPoint(parser, sparse, false, GeoUtils.EffectivePoint.BOTTOM_RIGHT);
bottom = sparse.getLat();
right = sparse.getLon();
} else if (TOP_RIGHT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
GeoUtils.parseGeoPoint(parser, sparse);
GeoUtils.parseGeoPoint(parser, sparse, false, GeoUtils.EffectivePoint.TOP_RIGHT);
top = sparse.getLat();
right = sparse.getLon();
} else if (BOTTOM_LEFT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
GeoUtils.parseGeoPoint(parser, sparse);
GeoUtils.parseGeoPoint(parser, sparse, false, GeoUtils.EffectivePoint.BOTTOM_LEFT);
bottom = sparse.getLat();
left = sparse.getLon();
} else {
@ -515,7 +515,8 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
}
}
if (envelope != null) {
if ((Double.isNaN(top) || Double.isNaN(bottom) || Double.isNaN(left) || Double.isNaN(right)) == false) {
if (Double.isNaN(top) == false || Double.isNaN(bottom) == false || Double.isNaN(left) == false ||
Double.isNaN(right) == false) {
throw new ElasticsearchParseException("failed to parse bounding box. Conflicting definition found "
+ "using well-known text and explicit corners.");
}

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.script.ScoreScript;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.SearchScript;
@ -92,8 +93,8 @@ public class ScriptScoreFunctionBuilder extends ScoreFunctionBuilder<ScriptScore
@Override
protected ScoreFunction doToFunction(QueryShardContext context) {
try {
SearchScript.Factory factory = context.getScriptService().compile(script, SearchScript.SCRIPT_SCORE_CONTEXT);
SearchScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup());
ScoreScript.Factory factory = context.getScriptService().compile(script, ScoreScript.CONTEXT);
ScoreScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup());
return new ScriptScoreFunction(script, searchScript);
} catch (Exception e) {
throw new QueryShardException(context, "script_score: the script could not be loaded", e);

View File

@ -32,10 +32,10 @@ import java.util.Map;
*/
public final class Pipeline {
static final String DESCRIPTION_KEY = "description";
static final String PROCESSORS_KEY = "processors";
static final String VERSION_KEY = "version";
static final String ON_FAILURE_KEY = "on_failure";
public static final String DESCRIPTION_KEY = "description";
public static final String PROCESSORS_KEY = "processors";
public static final String VERSION_KEY = "version";
public static final String ON_FAILURE_KEY = "on_failure";
private final String id;
@Nullable

View File

@ -25,7 +25,6 @@ import org.elasticsearch.cluster.AbstractNamedDiffable;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;

View File

@ -0,0 +1,102 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.script;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Scorer;
import org.elasticsearch.index.fielddata.ScriptDocValues;
import org.elasticsearch.search.lookup.LeafSearchLookup;
import org.elasticsearch.search.lookup.SearchLookup;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.Map;
import java.util.function.DoubleSupplier;
/**
* A script used for adjusting the score on a per document basis.
*/
public abstract class ScoreScript {
public static final String[] PARAMETERS = new String[]{};
/** The generic runtime parameters for the script. */
private final Map<String, Object> params;
/** A leaf lookup for the bound segment this script will operate on. */
private final LeafSearchLookup leafLookup;
private DoubleSupplier scoreSupplier = () -> 0.0;
public ScoreScript(Map<String, Object> params, SearchLookup lookup, LeafReaderContext leafContext) {
this.params = params;
this.leafLookup = lookup.getLeafSearchLookup(leafContext);
}
public abstract double execute();
/** Return the parameters for this script. */
public Map<String, Object> getParams() {
return params;
}
/** The doc lookup for the Lucene segment this script was created for. */
public final Map<String, ScriptDocValues<?>> getDoc() {
return leafLookup.doc();
}
/** Set the current document to run the script on next. */
public void setDocument(int docid) {
leafLookup.setDocument(docid);
}
public void setScorer(Scorer scorer) {
this.scoreSupplier = () -> {
try {
return scorer.score();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
};
}
public double get_score() {
return scoreSupplier.getAsDouble();
}
/** A factory to construct {@link ScoreScript} instances. */
public interface LeafFactory {
/**
* Return {@code true} if the script needs {@code _score} calculated, or {@code false} otherwise.
*/
boolean needs_score();
ScoreScript newInstance(LeafReaderContext ctx) throws IOException;
}
/** A factory to construct stateful {@link ScoreScript} factories for a specific index. */
public interface Factory {
ScoreScript.LeafFactory newFactory(Map<String, Object> params, SearchLookup lookup);
}
public static final ScriptContext<ScoreScript.Factory> CONTEXT = new ScriptContext<>("score", ScoreScript.Factory.class);
}

View File

@ -42,7 +42,7 @@ public class ScriptModule {
CORE_CONTEXTS = Stream.of(
SearchScript.CONTEXT,
SearchScript.AGGS_CONTEXT,
SearchScript.SCRIPT_SCORE_CONTEXT,
ScoreScript.CONTEXT,
SearchScript.SCRIPT_SORT_CONTEXT,
SearchScript.TERMS_SET_QUERY_CONTEXT,
ExecutableScript.CONTEXT,

View File

@ -162,8 +162,6 @@ public abstract class SearchScript implements ScorerAware, ExecutableScript {
public static final ScriptContext<Factory> AGGS_CONTEXT = new ScriptContext<>("aggs", Factory.class);
// Can return a double. (For ScriptSortType#NUMBER only, for ScriptSortType#STRING normal CONTEXT should be used)
public static final ScriptContext<Factory> SCRIPT_SORT_CONTEXT = new ScriptContext<>("sort", Factory.class);
// Can return a float
public static final ScriptContext<Factory> SCRIPT_SCORE_CONTEXT = new ScriptContext<>("score", Factory.class);
// Can return a long
public static final ScriptContext<Factory> TERMS_SET_QUERY_CONTEXT = new ScriptContext<>("terms_set", Factory.class);
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.search.aggregations.bucket.range;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -57,35 +58,41 @@ public final class InternalBinaryRange
long docCount, InternalAggregations aggregations) {
this.format = format;
this.keyed = keyed;
this.key = key;
this.key = key != null ? key : generateKey(from, to, format);
this.from = from;
this.to = to;
this.docCount = docCount;
this.aggregations = aggregations;
}
// for serialization
private Bucket(StreamInput in, DocValueFormat format, boolean keyed) throws IOException {
this.format = format;
this.keyed = keyed;
key = in.readOptionalString();
if (in.readBoolean()) {
from = in.readBytesRef();
} else {
from = null;
}
if (in.readBoolean()) {
to = in.readBytesRef();
} else {
to = null;
}
docCount = in.readLong();
aggregations = InternalAggregations.readAggregations(in);
private static String generateKey(BytesRef from, BytesRef to, DocValueFormat format) {
StringBuilder builder = new StringBuilder()
.append(from == null ? "*" : format.format(from))
.append("-")
.append(to == null ? "*" : format.format(to));
return builder.toString();
}
private static Bucket createFromStream(StreamInput in, DocValueFormat format, boolean keyed) throws IOException {
String key = in.getVersion().onOrAfter(Version.V_6_4_0)
? in.readString()
: in.readOptionalString();
BytesRef from = in.readBoolean() ? in.readBytesRef() : null;
BytesRef to = in.readBoolean() ? in.readBytesRef() : null;
long docCount = in.readLong();
InternalAggregations aggregations = InternalAggregations.readAggregations(in);
return new Bucket(format, keyed, key, from, to, docCount, aggregations);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(key);
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
out.writeString(key);
} else {
out.writeOptionalString(key);
}
out.writeBoolean(from != null);
if (from != null) {
out.writeBytesRef(from);
@ -122,19 +129,10 @@ public final class InternalBinaryRange
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
String key = this.key;
if (keyed) {
if (key == null) {
StringBuilder keyBuilder = new StringBuilder();
keyBuilder.append(from == null ? "*" : format.format(from));
keyBuilder.append("-");
keyBuilder.append(to == null ? "*" : format.format(to));
key = keyBuilder.toString();
}
builder.startObject(key);
} else {
builder.startObject();
if (key != null) {
builder.field(CommonFields.KEY.getPreferredName(), key);
}
builder.field(CommonFields.KEY.getPreferredName(), key);
}
if (from != null) {
builder.field(CommonFields.FROM.getPreferredName(), getFrom());
@ -208,10 +206,9 @@ public final class InternalBinaryRange
super(in);
format = in.readNamedWriteable(DocValueFormat.class);
keyed = in.readBoolean();
buckets = in.readList(stream -> new Bucket(stream, format, keyed));
buckets = in.readList(stream -> Bucket.createFromStream(stream, format, keyed));
}
@Override
protected void doWriteTo(StreamOutput out) throws IOException {
out.writeNamedWriteable(format);

View File

@ -98,18 +98,16 @@ public class ParsedBinaryRange extends ParsedMultiBucketAggregation<ParsedBinary
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (isKeyed()) {
builder.startObject(key != null ? key : rangeKey(from, to));
builder.startObject(key);
} else {
builder.startObject();
if (key != null) {
builder.field(CommonFields.KEY.getPreferredName(), key);
}
builder.field(CommonFields.KEY.getPreferredName(), key);
}
if (from != null) {
builder.field(CommonFields.FROM.getPreferredName(), getFrom());
builder.field(CommonFields.FROM.getPreferredName(), from);
}
if (to != null) {
builder.field(CommonFields.TO.getPreferredName(), getTo());
builder.field(CommonFields.TO.getPreferredName(), to);
}
builder.field(CommonFields.DOC_COUNT.getPreferredName(), getDocCount());
getAggregations().toXContentInternal(builder, params);
@ -123,10 +121,9 @@ public class ParsedBinaryRange extends ParsedMultiBucketAggregation<ParsedBinary
XContentParser.Token token = parser.currentToken();
String currentFieldName = parser.currentName();
String rangeKey = null;
if (keyed) {
ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation);
rangeKey = currentFieldName;
bucket.key = currentFieldName;
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
}
@ -150,19 +147,7 @@ public class ParsedBinaryRange extends ParsedMultiBucketAggregation<ParsedBinary
}
}
bucket.setAggregations(new Aggregations(aggregations));
if (keyed) {
if (rangeKey(bucket.from, bucket.to).equals(rangeKey)) {
bucket.key = null;
} else {
bucket.key = rangeKey;
}
}
return bucket;
}
private static String rangeKey(String from, String to) {
return (from == null ? "*" : from) + '-' + (to == null ? "*" : to);
}
}
}

View File

@ -1438,6 +1438,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
streamIn = new NamedWriteableAwareStreamInput(streamIn, namedWriteableRegistry);
streamIn.setVersion(version);
threadPool.getThreadContext().readHeaders(streamIn);
threadPool.getThreadContext().putTransient("_remote_address", remoteAddress);
if (TransportStatus.isRequest(status)) {
handleRequest(channel, profileName, streamIn, requestId, messageLengthBytes, version, remoteAddress, status);
} else {

View File

@ -20,9 +20,13 @@
package org.elasticsearch.action.ingest;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.ingest.Pipeline;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
@ -43,4 +47,25 @@ public class PutPipelineRequestTests extends ESTestCase {
assertEquals(XContentType.JSON, serialized.getXContentType());
assertEquals("{}", serialized.getSource().utf8ToString());
}
public void testToXContent() throws IOException {
XContentType xContentType = randomFrom(XContentType.values());
XContentBuilder pipelineBuilder = XContentBuilder.builder(xContentType.xContent());
pipelineBuilder.startObject().field(Pipeline.DESCRIPTION_KEY, "some random set of processors");
pipelineBuilder.startArray(Pipeline.PROCESSORS_KEY);
//Start first processor
pipelineBuilder.startObject();
pipelineBuilder.startObject("set");
pipelineBuilder.field("field", "foo");
pipelineBuilder.field("value", "bar");
pipelineBuilder.endObject();
pipelineBuilder.endObject();
//End first processor
pipelineBuilder.endArray();
pipelineBuilder.endObject();
PutPipelineRequest request = new PutPipelineRequest("1", BytesReference.bytes(pipelineBuilder), xContentType);
XContentBuilder requestBuilder = XContentBuilder.builder(xContentType.xContent());
BytesReference actualRequestBody = BytesReference.bytes(request.toXContent(requestBuilder, ToXContent.EMPTY_PARAMS));
assertEquals(BytesReference.bytes(pipelineBuilder), actualRequestBody);
}
}

View File

@ -0,0 +1,53 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.ingest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractStreamableXContentTestCase;
public class PutPipelineResponseTests extends AbstractStreamableXContentTestCase<PutPipelineResponse> {
public void testToXContent() {
PutPipelineResponse response = new PutPipelineResponse(true);
String output = Strings.toString(response);
assertEquals("{\"acknowledged\":true}", output);
}
@Override
protected PutPipelineResponse doParseInstance(XContentParser parser) {
return PutPipelineResponse.fromXContent(parser);
}
@Override
protected PutPipelineResponse createTestInstance() {
return new PutPipelineResponse(randomBoolean());
}
@Override
protected PutPipelineResponse createBlankInstance() {
return new PutPipelineResponse();
}
@Override
protected PutPipelineResponse mutateInstance(PutPipelineResponse response) {
return new PutPipelineResponse(response.isAcknowledged() == false);
}
}

View File

@ -66,6 +66,7 @@ public class IndexingMasterFailoverIT extends ESIntegTestCase {
* This retry logic is implemented in TransportMasterNodeAction and tested by the following master failover scenario.
*/
@TestLogging("_root:DEBUG")
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30844")
public void testMasterFailoverDuringIndexingWithMappingChanges() throws Throwable {
logger.info("--> start 4 nodes, 3 master, 1 data");

View File

@ -139,6 +139,8 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase {
protected static void assertHeaders(Map<String, String> headers, Map<String, String> expected) {
assertNotNull(headers);
headers = new HashMap<>(headers);
headers.remove("transport_client"); // default header on TPC
assertEquals(expected.size(), headers.size());
for (Map.Entry<String, String> expectedEntry : expected.entrySet()) {
assertEquals(headers.get(expectedEntry.getKey()), expectedEntry.getValue());
@ -146,7 +148,6 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase {
}
protected static void assertHeaders(ThreadPool pool) {
Map<String, String> headers = new HashMap<>();
Settings asSettings = HEADER_SETTINGS.getAsSettings(ThreadContext.PREFIX);
assertHeaders(pool.getThreadContext().getHeaders(),
asSettings.keySet().stream().collect(Collectors.toMap(Function.identity(), k -> asSettings.get(k))));

View File

@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESTestCase;
@ -63,6 +64,17 @@ public class TransportClientTests extends ESTestCase {
}
}
public void testDefaultHeaderContainsPlugins() {
Settings baseSettings = Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.build();
try (TransportClient client = new MockTransportClient(baseSettings, Arrays.asList(MockPlugin.class))) {
ThreadContext threadContext = client.threadPool().getThreadContext();
assertEquals("true", threadContext.getHeader("transport_client"));
assertEquals("true", threadContext.getHeader("test"));
}
}
public static class MockPlugin extends Plugin {
@Override
@ -70,6 +82,11 @@ public class TransportClientTests extends ESTestCase {
return Arrays.asList(new Entry[]{ new Entry(MockNamedWriteable.class, MockNamedWriteable.NAME, MockNamedWriteable::new)});
}
@Override
public Settings additionalSettings() {
return Settings.builder().put(ThreadContext.PREFIX + "." + "test", true).build();
}
public class MockNamedWriteable implements NamedWriteable {
static final String NAME = "mockNamedWritable";

View File

@ -30,6 +30,7 @@ import org.elasticsearch.cluster.NodeConnectionsService;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
@ -135,9 +136,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
clusterApplierService.currentTimeOverride = System.nanoTime();
clusterApplierService.runOnApplierThread("test1",
currentState -> clusterApplierService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos(),
new ClusterStateTaskListener() {
new ClusterApplyListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
latch.countDown();
}
@ -151,9 +152,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
clusterApplierService.currentTimeOverride += TimeValue.timeValueSeconds(2).nanos();
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
},
new ClusterStateTaskListener() {
new ClusterApplyListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
fail();
}
@ -166,9 +167,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterApplierService.runOnApplierThread("test3",
currentState -> {},
new ClusterStateTaskListener() {
new ClusterApplyListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
latch.countDown();
}
@ -216,9 +217,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
clusterApplierService.currentTimeOverride = System.nanoTime();
clusterApplierService.runOnApplierThread("test1",
currentState -> clusterApplierService.currentTimeOverride += TimeValue.timeValueSeconds(1).nanos(),
new ClusterStateTaskListener() {
new ClusterApplyListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
latch.countDown();
processedFirstTask.countDown();
}
@ -234,9 +235,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
clusterApplierService.currentTimeOverride += TimeValue.timeValueSeconds(32).nanos();
throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task");
},
new ClusterStateTaskListener() {
new ClusterApplyListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
fail();
}
@ -247,9 +248,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
});
clusterApplierService.runOnApplierThread("test3",
currentState -> clusterApplierService.currentTimeOverride += TimeValue.timeValueSeconds(34).nanos(),
new ClusterStateTaskListener() {
new ClusterApplyListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
latch.countDown();
}
@ -262,9 +263,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
// We don't check logging for this on since there is no guarantee that it will occur before our check
clusterApplierService.runOnApplierThread("test4",
currentState -> {},
new ClusterStateTaskListener() {
new ClusterApplyListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
latch.countDown();
}
@ -340,10 +341,10 @@ public class ClusterApplierServiceTests extends ESTestCase {
CountDownLatch latch = new CountDownLatch(1);
clusterApplierService.onNewClusterState("test", () -> ClusterState.builder(clusterApplierService.state()).build(),
new ClusterStateTaskListener() {
new ClusterApplyListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
latch.countDown();
}
@ -390,9 +391,9 @@ public class ClusterApplierServiceTests extends ESTestCase {
CountDownLatch latch = new CountDownLatch(1);
clusterApplierService.onNewClusterState("test", () -> ClusterState.builder(clusterApplierService.state()).build(),
new ClusterStateTaskListener() {
new ClusterApplyListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
latch.countDown();
}

View File

@ -457,6 +457,62 @@ public class CacheTests extends ESTestCase {
assertEquals(notifications, invalidated);
}
// randomly invalidate some cached entries, then check that a lookup for each of those and only those keys is null
public void testInvalidateWithValue() {
Cache<Integer, String> cache = CacheBuilder.<Integer, String>builder().build();
for (int i = 0; i < numberOfEntries; i++) {
cache.put(i, Integer.toString(i));
}
Set<Integer> keys = new HashSet<>();
for (Integer key : cache.keys()) {
if (rarely()) {
if (randomBoolean()) {
cache.invalidate(key, key.toString());
keys.add(key);
} else {
// invalidate with incorrect value
cache.invalidate(key, Integer.toString(key * randomIntBetween(2, 10)));
}
}
}
for (int i = 0; i < numberOfEntries; i++) {
if (keys.contains(i)) {
assertNull(cache.get(i));
} else {
assertNotNull(cache.get(i));
}
}
}
// randomly invalidate some cached entries, then check that we receive invalidate notifications for those and only
// those entries
public void testNotificationOnInvalidateWithValue() {
Set<Integer> notifications = new HashSet<>();
Cache<Integer, String> cache =
CacheBuilder.<Integer, String>builder()
.removalListener(notification -> {
assertEquals(RemovalNotification.RemovalReason.INVALIDATED, notification.getRemovalReason());
notifications.add(notification.getKey());
})
.build();
for (int i = 0; i < numberOfEntries; i++) {
cache.put(i, Integer.toString(i));
}
Set<Integer> invalidated = new HashSet<>();
for (int i = 0; i < numberOfEntries; i++) {
if (rarely()) {
if (randomBoolean()) {
cache.invalidate(i, Integer.toString(i));
invalidated.add(i);
} else {
// invalidate with incorrect value
cache.invalidate(i, Integer.toString(i * randomIntBetween(2, 10)));
}
}
}
assertEquals(notifications, invalidated);
}
// invalidate all cached entries, then check that the cache is empty
public void testInvalidateAll() {
Cache<Integer, String> cache = CacheBuilder.<Integer, String>builder().build();

View File

@ -0,0 +1,118 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase;
import org.junit.After;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicInteger;
public class ListenableFutureTests extends ESTestCase {
private ExecutorService executorService;
@After
public void stopExecutorService() throws InterruptedException {
if (executorService != null) {
terminate(executorService);
}
}
public void testListenableFutureNotifiesListeners() {
ListenableFuture<String> future = new ListenableFuture<>();
AtomicInteger notifications = new AtomicInteger(0);
final int numberOfListeners = scaledRandomIntBetween(1, 12);
for (int i = 0; i < numberOfListeners; i++) {
future.addListener(ActionListener.wrap(notifications::incrementAndGet), EsExecutors.newDirectExecutorService());
}
future.onResponse("");
assertEquals(numberOfListeners, notifications.get());
assertTrue(future.isDone());
}
public void testListenableFutureNotifiesListenersOnException() {
ListenableFuture<String> future = new ListenableFuture<>();
AtomicInteger notifications = new AtomicInteger(0);
final int numberOfListeners = scaledRandomIntBetween(1, 12);
final Exception exception = new RuntimeException();
for (int i = 0; i < numberOfListeners; i++) {
future.addListener(ActionListener.wrap(s -> fail("this should never be called"), e -> {
assertEquals(exception, e);
notifications.incrementAndGet();
}), EsExecutors.newDirectExecutorService());
}
future.onFailure(exception);
assertEquals(numberOfListeners, notifications.get());
assertTrue(future.isDone());
}
public void testConcurrentListenerRegistrationAndCompletion() throws BrokenBarrierException, InterruptedException {
final int numberOfThreads = scaledRandomIntBetween(2, 32);
final int completingThread = randomIntBetween(0, numberOfThreads - 1);
final ListenableFuture<String> future = new ListenableFuture<>();
executorService = EsExecutors.newFixed("testConcurrentListenerRegistrationAndCompletion", numberOfThreads, 1000,
EsExecutors.daemonThreadFactory("listener"), new ThreadContext(Settings.EMPTY));
final CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads);
final CountDownLatch listenersLatch = new CountDownLatch(numberOfThreads - 1);
final AtomicInteger numResponses = new AtomicInteger(0);
final AtomicInteger numExceptions = new AtomicInteger(0);
for (int i = 0; i < numberOfThreads; i++) {
final int threadNum = i;
Thread thread = new Thread(() -> {
try {
barrier.await();
if (threadNum == completingThread) {
future.onResponse("");
} else {
future.addListener(ActionListener.wrap(s -> {
assertEquals("", s);
numResponses.incrementAndGet();
listenersLatch.countDown();
}, e -> {
logger.error("caught unexpected exception", e);
numExceptions.incrementAndGet();
listenersLatch.countDown();
}), executorService);
}
barrier.await();
} catch (InterruptedException | BrokenBarrierException e) {
throw new AssertionError(e);
}
});
thread.start();
}
barrier.await();
barrier.await();
listenersLatch.await();
assertEquals(numberOfThreads - 1, numResponses.get());
assertEquals(0, numExceptions.get());
}
}

View File

@ -23,7 +23,6 @@ import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterApplier;
@ -72,9 +71,9 @@ public class SingleNodeDiscoveryTests extends ESTestCase {
@Override
public void onNewClusterState(String source, Supplier<ClusterState> clusterStateSupplier,
ClusterStateTaskListener listener) {
ClusterApplyListener listener) {
clusterState.set(clusterStateSupplier.get());
listener.clusterStateProcessed(source, clusterState.get(), clusterState.get());
listener.onSuccess(source);
}
});
discovery.start();

View File

@ -26,7 +26,6 @@ import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.ESAllocationTestCase;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
@ -314,8 +313,8 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
}
@Override
public void onNewClusterState(String source, Supplier<ClusterState> clusterStateSupplier, ClusterStateTaskListener listener) {
listener.clusterStateProcessed(source, clusterStateSupplier.get(), clusterStateSupplier.get());
public void onNewClusterState(String source, Supplier<ClusterState> clusterStateSupplier, ClusterApplyListener listener) {
listener.onSuccess(source);
}
};
ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, service, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()),

View File

@ -24,6 +24,7 @@ import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.search.IndexOrDocValuesQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.index.mapper.MappedFieldType;
@ -450,6 +451,64 @@ public class GeoBoundingBoxQueryBuilderTests extends AbstractQueryTestCase<GeoBo
assertEquals(expectedJson, GeoExecType.MEMORY, parsed.type());
}
public void testFromGeohash() throws IOException {
String json =
"{\n" +
" \"geo_bounding_box\" : {\n" +
" \"pin.location\" : {\n" +
" \"top_left\" : \"dr\",\n" +
" \"bottom_right\" : \"dq\"\n" +
" },\n" +
" \"validation_method\" : \"STRICT\",\n" +
" \"type\" : \"MEMORY\",\n" +
" \"ignore_unmapped\" : false,\n" +
" \"boost\" : 1.0\n" +
" }\n" +
"}";
String expectedJson =
"{\n" +
" \"geo_bounding_box\" : {\n" +
" \"pin.location\" : {\n" +
" \"top_left\" : [ -78.75, 45.0 ],\n" +
" \"bottom_right\" : [ -67.5, 33.75 ]\n" +
" },\n" +
" \"validation_method\" : \"STRICT\",\n" +
" \"type\" : \"MEMORY\",\n" +
" \"ignore_unmapped\" : false,\n" +
" \"boost\" : 1.0\n" +
" }\n" +
"}";
GeoBoundingBoxQueryBuilder parsed = (GeoBoundingBoxQueryBuilder) parseQuery(json);
checkGeneratedJson(expectedJson, parsed);
assertEquals(json, "pin.location", parsed.fieldName());
assertEquals(json, -78.75, parsed.topLeft().getLon(), 0.0001);
assertEquals(json, 45.0, parsed.topLeft().getLat(), 0.0001);
assertEquals(json, -67.5, parsed.bottomRight().getLon(), 0.0001);
assertEquals(json, 33.75, parsed.bottomRight().getLat(), 0.0001);
assertEquals(json, 1.0, parsed.boost(), 0.0001);
assertEquals(json, GeoExecType.MEMORY, parsed.type());
}
public void testMalformedGeohashes() {
String jsonGeohashAndWkt =
"{\n" +
" \"geo_bounding_box\" : {\n" +
" \"pin.location\" : {\n" +
" \"top_left\" : [ -78.75, 45.0 ],\n" +
" \"wkt\" : \"BBOX (-74.1, -71.12, 40.73, 40.01)\"\n" +
" },\n" +
" \"validation_method\" : \"STRICT\",\n" +
" \"type\" : \"MEMORY\",\n" +
" \"ignore_unmapped\" : false,\n" +
" \"boost\" : 1.0\n" +
" }\n" +
"}";
ElasticsearchParseException e1 = expectThrows(ElasticsearchParseException.class, () -> parseQuery(jsonGeohashAndWkt));
assertThat(e1.getMessage(), containsString("Conflicting definition found using well-known text and explicit corners."));
}
@Override
public void testMustRewrite() throws IOException {
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);

View File

@ -610,6 +610,20 @@ public class GeoUtilsTests extends ESTestCase {
}
}
public void testParseGeoPointGeohashPositions() throws IOException {
assertNormalizedPoint(parseGeohash("drt5", GeoUtils.EffectivePoint.TOP_LEFT), new GeoPoint(42.890625, -71.71875));
assertNormalizedPoint(parseGeohash("drt5", GeoUtils.EffectivePoint.TOP_RIGHT), new GeoPoint(42.890625, -71.3671875));
assertNormalizedPoint(parseGeohash("drt5", GeoUtils.EffectivePoint.BOTTOM_LEFT), new GeoPoint(42.71484375, -71.71875));
assertNormalizedPoint(parseGeohash("drt5", GeoUtils.EffectivePoint.BOTTOM_RIGHT), new GeoPoint(42.71484375, -71.3671875));
assertNormalizedPoint(parseGeohash("drtk", GeoUtils.EffectivePoint.BOTTOM_LEFT), new GeoPoint(42.890625, -71.3671875));
}
private GeoPoint parseGeohash(String geohash, GeoUtils.EffectivePoint effectivePoint) throws IOException {
XContentParser parser = createParser(jsonBuilder().startObject().field("geohash", geohash).endObject());
parser.nextToken();
return GeoUtils.parseGeoPoint(parser, new GeoPoint(), randomBoolean(), effectivePoint);
}
private static void assertNormalizedPoint(GeoPoint input, GeoPoint expected) {
GeoUtils.normalizePoint(input);
if (Double.isNaN(expected.lat())) {

View File

@ -36,6 +36,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener;
import org.elasticsearch.cluster.service.ClusterApplierService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
@ -446,9 +447,9 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder).build())
.build();
CountDownLatch latch = new CountDownLatch(1);
clusterApplierService.onNewClusterState("test", () -> newState, new ClusterStateTaskListener() {
clusterApplierService.onNewClusterState("test", () -> newState, new ClusterApplyListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
latch.countDown();
}

View File

@ -33,9 +33,7 @@ import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseField;
@ -100,12 +98,6 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P
public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
return Arrays.asList(
new NamedWriteableRegistry.Entry(PersistentTaskParams.class, TestPersistentTasksExecutor.NAME, TestParams::new),
new NamedWriteableRegistry.Entry(Task.Status.class,
PersistentTasksNodeService.Status.NAME, PersistentTasksNodeService.Status::new),
new NamedWriteableRegistry.Entry(MetaData.Custom.class, PersistentTasksCustomMetaData.TYPE,
PersistentTasksCustomMetaData::new),
new NamedWriteableRegistry.Entry(NamedDiff.class, PersistentTasksCustomMetaData.TYPE,
PersistentTasksCustomMetaData::readDiffFrom),
new NamedWriteableRegistry.Entry(Task.Status.class, TestPersistentTasksExecutor.NAME, Status::new)
);
}
@ -113,8 +105,6 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P
@Override
public List<NamedXContentRegistry.Entry> getNamedXContent() {
return Arrays.asList(
new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(PersistentTasksCustomMetaData.TYPE),
PersistentTasksCustomMetaData::fromXContent),
new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(TestPersistentTasksExecutor.NAME),
TestParams::fromXContent),
new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(TestPersistentTasksExecutor.NAME), Status::fromXContent)

View File

@ -18,14 +18,8 @@
*/
package org.elasticsearch.search.aggregations.bucket;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.function.Function;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.MockScriptPlugin;
@ -35,6 +29,12 @@ import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.bucket.range.Range;
import org.elasticsearch.test.ESIntegTestCase;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.function.Function;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.hamcrest.Matchers.containsString;
@ -91,16 +91,19 @@ public class IpRangeIT extends ESIntegTestCase {
Range.Bucket bucket1 = range.getBuckets().get(0);
assertNull(bucket1.getFrom());
assertEquals("192.168.1.0", bucket1.getTo());
assertEquals("*-192.168.1.0", bucket1.getKey());
assertEquals(0, bucket1.getDocCount());
Range.Bucket bucket2 = range.getBuckets().get(1);
assertEquals("192.168.1.0", bucket2.getFrom());
assertEquals("192.168.1.10", bucket2.getTo());
assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey());
assertEquals(1, bucket2.getDocCount());
Range.Bucket bucket3 = range.getBuckets().get(2);
assertEquals("192.168.1.10", bucket3.getFrom());
assertNull(bucket3.getTo());
assertEquals("192.168.1.10-*", bucket3.getKey());
assertEquals(2, bucket3.getDocCount());
}
@ -118,16 +121,19 @@ public class IpRangeIT extends ESIntegTestCase {
Range.Bucket bucket1 = range.getBuckets().get(0);
assertNull(bucket1.getFrom());
assertEquals("192.168.1.0", bucket1.getTo());
assertEquals("*-192.168.1.0", bucket1.getKey());
assertEquals(1, bucket1.getDocCount());
Range.Bucket bucket2 = range.getBuckets().get(1);
assertEquals("192.168.1.0", bucket2.getFrom());
assertEquals("192.168.1.10", bucket2.getTo());
assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey());
assertEquals(1, bucket2.getDocCount());
Range.Bucket bucket3 = range.getBuckets().get(2);
assertEquals("192.168.1.10", bucket3.getFrom());
assertNull(bucket3.getTo());
assertEquals("192.168.1.10-*", bucket3.getKey());
assertEquals(2, bucket3.getDocCount());
}
@ -169,16 +175,19 @@ public class IpRangeIT extends ESIntegTestCase {
Range.Bucket bucket1 = range.getBuckets().get(0);
assertNull(bucket1.getFrom());
assertEquals("192.168.1.0", bucket1.getTo());
assertEquals("*-192.168.1.0", bucket1.getKey());
assertEquals(0, bucket1.getDocCount());
Range.Bucket bucket2 = range.getBuckets().get(1);
assertEquals("192.168.1.0", bucket2.getFrom());
assertEquals("192.168.1.10", bucket2.getTo());
assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey());
assertEquals(1, bucket2.getDocCount());
Range.Bucket bucket3 = range.getBuckets().get(2);
assertEquals("192.168.1.10", bucket3.getFrom());
assertNull(bucket3.getTo());
assertEquals("192.168.1.10-*", bucket3.getKey());
assertEquals(2, bucket3.getDocCount());
}
@ -196,16 +205,19 @@ public class IpRangeIT extends ESIntegTestCase {
Range.Bucket bucket1 = range.getBuckets().get(0);
assertNull(bucket1.getFrom());
assertEquals("192.168.1.0", bucket1.getTo());
assertEquals("*-192.168.1.0", bucket1.getKey());
assertEquals(0, bucket1.getDocCount());
Range.Bucket bucket2 = range.getBuckets().get(1);
assertEquals("192.168.1.0", bucket2.getFrom());
assertEquals("192.168.1.10", bucket2.getTo());
assertEquals("192.168.1.0-192.168.1.10", bucket2.getKey());
assertEquals(0, bucket2.getDocCount());
Range.Bucket bucket3 = range.getBuckets().get(2);
assertEquals("192.168.1.10", bucket3.getFrom());
assertNull(bucket3.getTo());
assertEquals("192.168.1.10-*", bucket3.getKey());
assertEquals(0, bucket3.getDocCount());
}

View File

@ -157,4 +157,15 @@ public class InternalBinaryRangeTests extends InternalRangeTestCase<InternalBina
}
return new InternalBinaryRange(name, format, keyed, buckets, pipelineAggregators, metaData);
}
/**
* Checks the invariant that bucket keys are always non-null, even if null keys
* were originally provided.
*/
public void testKeyGeneration() {
InternalBinaryRange range = createTestInstance();
for (InternalBinaryRange.Bucket bucket : range.getBuckets()) {
assertNotNull(bucket.getKey());
}
}
}

View File

@ -30,14 +30,14 @@ import org.elasticsearch.index.fielddata.ScriptDocValues;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.ScriptPlugin;
import org.elasticsearch.script.ExplainableSearchScript;
import org.elasticsearch.script.ScoreScript;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptEngine;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.script.SearchScript;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.lookup.LeafDocLookup;
import org.elasticsearch.search.lookup.SearchLookup;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
@ -76,16 +76,17 @@ public class ExplainableScriptIT extends ESIntegTestCase {
@Override
public <T> T compile(String scriptName, String scriptSource, ScriptContext<T> context, Map<String, String> params) {
assert scriptSource.equals("explainable_script");
assert context == SearchScript.SCRIPT_SCORE_CONTEXT;
SearchScript.Factory factory = (p, lookup) -> new SearchScript.LeafFactory() {
@Override
public SearchScript newInstance(LeafReaderContext context) throws IOException {
return new MyScript(lookup.doc().getLeafDocLookup(context));
}
assert context == ScoreScript.CONTEXT;
ScoreScript.Factory factory = (params1, lookup) -> new ScoreScript.LeafFactory() {
@Override
public boolean needs_score() {
return false;
}
@Override
public ScoreScript newInstance(LeafReaderContext ctx) throws IOException {
return new MyScript(params1, lookup, ctx);
}
};
return context.factoryClazz.cast(factory);
}
@ -93,28 +94,21 @@ public class ExplainableScriptIT extends ESIntegTestCase {
}
}
static class MyScript extends SearchScript implements ExplainableSearchScript {
LeafDocLookup docLookup;
static class MyScript extends ScoreScript implements ExplainableSearchScript {
MyScript(LeafDocLookup docLookup) {
super(null, null, null);
this.docLookup = docLookup;
MyScript(Map<String, Object> params, SearchLookup lookup, LeafReaderContext leafContext) {
super(params, lookup, leafContext);
}
@Override
public void setDocument(int doc) {
docLookup.setDocument(doc);
}
@Override
public Explanation explain(Explanation subQueryScore) throws IOException {
Explanation scoreExp = Explanation.match(subQueryScore.getValue(), "_score: ", subQueryScore);
return Explanation.match((float) (runAsDouble()), "This script returned " + runAsDouble(), scoreExp);
return Explanation.match((float) (execute()), "This script returned " + execute(), scoreExp);
}
@Override
public double runAsDouble() {
return ((Number) ((ScriptDocValues) docLookup.get("number_field")).getValues().get(0)).doubleValue();
public double execute() {
return ((Number) ((ScriptDocValues) getDoc().get("number_field")).getValues().get(0)).doubleValue();
}
}

View File

@ -25,7 +25,6 @@ import org.elasticsearch.index.similarity.ScriptedSimilarity.Doc;
import org.elasticsearch.index.similarity.ScriptedSimilarity.Field;
import org.elasticsearch.index.similarity.ScriptedSimilarity.Query;
import org.elasticsearch.index.similarity.ScriptedSimilarity.Term;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctionScript;
import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions;
import org.elasticsearch.search.lookup.LeafSearchLookup;
@ -36,7 +35,6 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Predicate;
import static java.util.Collections.emptyMap;
@ -114,6 +112,9 @@ public class MockScriptEngine implements ScriptEngine {
} else if (context.instanceClazz.equals(MovingFunctionScript.class)) {
MovingFunctionScript.Factory factory = mockCompiled::createMovingFunctionScript;
return context.factoryClazz.cast(factory);
} else if (context.instanceClazz.equals(ScoreScript.class)) {
ScoreScript.Factory factory = new MockScoreScript(script);
return context.factoryClazz.cast(factory);
}
throw new IllegalArgumentException("mock script engine does not know how to handle context [" + context.name + "]");
}
@ -342,5 +343,45 @@ public class MockScriptEngine implements ScriptEngine {
return MovingFunctions.unweightedAvg(values);
}
}
public class MockScoreScript implements ScoreScript.Factory {
private final Function<Map<String, Object>, Object> scripts;
MockScoreScript(Function<Map<String, Object>, Object> scripts) {
this.scripts = scripts;
}
@Override
public ScoreScript.LeafFactory newFactory(Map<String, Object> params, SearchLookup lookup) {
return new ScoreScript.LeafFactory() {
@Override
public boolean needs_score() {
return true;
}
@Override
public ScoreScript newInstance(LeafReaderContext ctx) throws IOException {
Scorer[] scorerHolder = new Scorer[1];
return new ScoreScript(params, lookup, ctx) {
@Override
public double execute() {
Map<String, Object> vars = new HashMap<>(getParams());
vars.put("doc", getDoc());
if (scorerHolder[0] != null) {
vars.put("_score", new ScoreAccessor(scorerHolder[0]));
}
return ((Number) scripts.apply(vars)).doubleValue();
}
@Override
public void setScorer(Scorer scorer) {
scorerHolder[0] = scorer;
}
};
}
};
}
}
}

View File

@ -24,13 +24,13 @@ import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.NodeConnectionsService;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterApplier;
import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener;
import org.elasticsearch.cluster.service.ClusterApplierService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.service.MasterService;
@ -72,9 +72,9 @@ public class ClusterServiceUtils {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<Exception> exception = new AtomicReference<>();
executor.onNewClusterState("test setting state",
() -> ClusterState.builder(clusterState).version(clusterState.version() + 1).build(), new ClusterStateTaskListener() {
() -> ClusterState.builder(clusterState).version(clusterState.version() + 1).build(), new ClusterApplyListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
latch.countDown();
}
@ -163,9 +163,9 @@ public class ClusterServiceUtils {
CountDownLatch latch = new CountDownLatch(1);
AtomicReference<Exception> ex = new AtomicReference<>();
clusterApplier.onNewClusterState("mock_publish_to_self[" + event.source() + "]", () -> event.state(),
new ClusterStateTaskListener() {
new ClusterApplyListener() {
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
public void onSuccess(String source) {
latch.countDown();
}

View File

@ -36,6 +36,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.mocksocket.MockServerSocket;
import org.elasticsearch.mocksocket.MockSocket;
import org.elasticsearch.common.concurrent.CompletableContext;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.BufferedInputStream;
@ -50,7 +51,6 @@ import java.net.SocketException;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
@ -218,7 +218,7 @@ public class MockTcpTransport extends TcpTransport {
private final Socket activeChannel;
private final String profile;
private final CancellableThreads cancellableThreads = new CancellableThreads();
private final CompletableFuture<Void> closeFuture = new CompletableFuture<>();
private final CompletableContext<Void> closeFuture = new CompletableContext<>();
/**
* Constructs a new MockChannel instance intended for handling the actual incoming / outgoing traffic.
@ -364,7 +364,7 @@ public class MockTcpTransport extends TcpTransport {
@Override
public void addCloseListener(ActionListener<Void> listener) {
closeFuture.whenComplete(ActionListener.toBiConsumer(listener));
closeFuture.addListener(ActionListener.toBiConsumer(listener));
}
@Override

View File

@ -16,6 +16,7 @@ import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.license.DeleteLicenseAction;
@ -90,13 +91,7 @@ import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction;
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState;
import org.elasticsearch.xpack.core.ml.job.config.JobTaskStatus;
import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage;
import org.elasticsearch.persistent.CompletionPersistentTaskAction;
import org.elasticsearch.persistent.PersistentTaskParams;
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
import org.elasticsearch.persistent.PersistentTasksNodeService;
import org.elasticsearch.persistent.RemovePersistentTaskAction;
import org.elasticsearch.persistent.StartPersistentTaskAction;
import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction;
import org.elasticsearch.xpack.core.rollup.RollupFeatureSetUsage;
import org.elasticsearch.xpack.core.rollup.RollupField;
import org.elasticsearch.xpack.core.rollup.action.DeleteRollupJobAction;
@ -193,6 +188,7 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
final Settings.Builder builder = Settings.builder();
builder.put(SecuritySettings.addTransportSettings(settings));
builder.put(SecuritySettings.addUserSettings(settings));
builder.put(ThreadContext.PREFIX + "." + "has_xpack", true);
return builder.build();
} else {
return Settings.EMPTY;
@ -253,11 +249,6 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
GetCalendarEventsAction.INSTANCE,
PostCalendarEventsAction.INSTANCE,
PersistJobAction.INSTANCE,
// licensing
StartPersistentTaskAction.INSTANCE,
UpdatePersistentTaskStatusAction.INSTANCE,
RemovePersistentTaskAction.INSTANCE,
CompletionPersistentTaskAction.INSTANCE,
// security
ClearRealmCacheAction.INSTANCE,
ClearRolesCacheAction.INSTANCE,
@ -322,18 +313,12 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
// ML - Custom metadata
new NamedWriteableRegistry.Entry(MetaData.Custom.class, "ml", MlMetadata::new),
new NamedWriteableRegistry.Entry(NamedDiff.class, "ml", MlMetadata.MlMetadataDiff::new),
new NamedWriteableRegistry.Entry(MetaData.Custom.class, PersistentTasksCustomMetaData.TYPE,
PersistentTasksCustomMetaData::new),
new NamedWriteableRegistry.Entry(NamedDiff.class, PersistentTasksCustomMetaData.TYPE,
PersistentTasksCustomMetaData::readDiffFrom),
// ML - Persistent action requests
new NamedWriteableRegistry.Entry(PersistentTaskParams.class, StartDatafeedAction.TASK_NAME,
StartDatafeedAction.DatafeedParams::new),
new NamedWriteableRegistry.Entry(PersistentTaskParams.class, OpenJobAction.TASK_NAME,
OpenJobAction.JobParams::new),
// ML - Task statuses
new NamedWriteableRegistry.Entry(Task.Status.class, PersistentTasksNodeService.Status.NAME,
PersistentTasksNodeService.Status::new),
new NamedWriteableRegistry.Entry(Task.Status.class, JobTaskStatus.NAME, JobTaskStatus::new),
new NamedWriteableRegistry.Entry(Task.Status.class, DatafeedState.NAME, DatafeedState::fromStream),
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.MACHINE_LEARNING,
@ -368,8 +353,6 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
// ML - Custom metadata
new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField("ml"),
parser -> MlMetadata.METADATA_PARSER.parse(parser, null).build()),
new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(PersistentTasksCustomMetaData.TYPE),
PersistentTasksCustomMetaData::fromXContent),
// ML - Persistent action requests
new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(StartDatafeedAction.TASK_NAME),
StartDatafeedAction.DatafeedParams::fromXContent),
@ -385,8 +368,7 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(LicensesMetaData.TYPE),
LicensesMetaData::fromXContent),
//rollup
new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(RollupField.TASK_NAME),
parser -> RollupJob.fromXContent(parser)),
new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(RollupField.TASK_NAME), RollupJob::fromXContent),
new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(RollupJobStatus.NAME), RollupJobStatus::fromXContent)
);
}

View File

@ -2,7 +2,6 @@
"index_patterns": [ ".watcher-history-${xpack.watcher.template.version}*" ],
"order": 2147483647,
"settings": {
"xpack.watcher.template.version": "${xpack.watcher.template.version}",
"index.number_of_shards": 1,
"index.number_of_replicas": 0,
"index.auto_expand_replicas": "0-1",
@ -10,6 +9,9 @@
},
"mappings": {
"doc": {
"_meta": {
"watcher-history-version": "${xpack.watcher.template.version}"
},
"dynamic_templates": [
{
"disabled_payload_fields": {

View File

@ -410,7 +410,7 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
final NativeRoleMappingStore nativeRoleMappingStore = new NativeRoleMappingStore(settings, client, securityIndex.get());
final AnonymousUser anonymousUser = new AnonymousUser(settings);
final ReservedRealm reservedRealm = new ReservedRealm(env, settings, nativeUsersStore,
anonymousUser, securityIndex.get(), threadPool.getThreadContext());
anonymousUser, securityIndex.get(), threadPool);
Map<String, Realm.Factory> realmFactories = new HashMap<>(InternalRealms.getFactories(threadPool, resourceWatcherService,
getSslService(), nativeUsersStore, nativeRoleMappingStore, securityIndex.get()));
for (SecurityExtension extension : securityExtensions) {

View File

@ -93,9 +93,9 @@ public final class InternalRealms {
SecurityIndexManager securityIndex) {
Map<String, Realm.Factory> map = new HashMap<>();
map.put(FileRealmSettings.TYPE, config -> new FileRealm(config, resourceWatcherService));
map.put(FileRealmSettings.TYPE, config -> new FileRealm(config, resourceWatcherService, threadPool));
map.put(NativeRealmSettings.TYPE, config -> {
final NativeRealm nativeRealm = new NativeRealm(config, nativeUsersStore);
final NativeRealm nativeRealm = new NativeRealm(config, nativeUsersStore, threadPool);
securityIndex.addIndexStateListener(nativeRealm::onSecurityIndexStateChange);
return nativeRealm;
});

View File

@ -6,6 +6,7 @@
package org.elasticsearch.xpack.security.authc.esnative;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.core.security.authc.AuthenticationResult;
import org.elasticsearch.xpack.core.security.authc.RealmConfig;
import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings;
@ -24,8 +25,8 @@ public class NativeRealm extends CachingUsernamePasswordRealm {
private final NativeUsersStore userStore;
public NativeRealm(RealmConfig config, NativeUsersStore usersStore) {
super(NativeRealmSettings.TYPE, config);
public NativeRealm(RealmConfig config, NativeUsersStore usersStore, ThreadPool threadPool) {
super(NativeRealmSettings.TYPE, config, threadPool);
this.userStore = usersStore;
}

View File

@ -14,8 +14,8 @@ import org.elasticsearch.common.settings.SecureSetting;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.env.Environment;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.core.XPackSettings;
import org.elasticsearch.xpack.core.security.SecurityField;
import org.elasticsearch.xpack.core.security.authc.AuthenticationResult;
@ -66,8 +66,8 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
private final SecurityIndexManager securityIndex;
public ReservedRealm(Environment env, Settings settings, NativeUsersStore nativeUsersStore, AnonymousUser anonymousUser,
SecurityIndexManager securityIndex, ThreadContext threadContext) {
super(TYPE, new RealmConfig(TYPE, Settings.EMPTY, settings, env, threadContext));
SecurityIndexManager securityIndex, ThreadPool threadPool) {
super(TYPE, new RealmConfig(TYPE, Settings.EMPTY, settings, env, threadPool.getThreadContext()), threadPool);
this.nativeUsersStore = nativeUsersStore;
this.realmEnabled = XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings);
this.anonymousUser = anonymousUser;

View File

@ -6,6 +6,7 @@
package org.elasticsearch.xpack.security.authc.file;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.watcher.ResourceWatcherService;
import org.elasticsearch.xpack.core.security.authc.AuthenticationResult;
import org.elasticsearch.xpack.core.security.authc.RealmConfig;
@ -21,13 +22,13 @@ public class FileRealm extends CachingUsernamePasswordRealm {
private final FileUserPasswdStore userPasswdStore;
private final FileUserRolesStore userRolesStore;
public FileRealm(RealmConfig config, ResourceWatcherService watcherService) {
this(config, new FileUserPasswdStore(config, watcherService), new FileUserRolesStore(config, watcherService));
public FileRealm(RealmConfig config, ResourceWatcherService watcherService, ThreadPool threadPool) {
this(config, new FileUserPasswdStore(config, watcherService), new FileUserRolesStore(config, watcherService), threadPool);
}
// pkg private for testing
FileRealm(RealmConfig config, FileUserPasswdStore userPasswdStore, FileUserRolesStore userRolesStore) {
super(FileRealmSettings.TYPE, config);
FileRealm(RealmConfig config, FileUserPasswdStore userPasswdStore, FileUserRolesStore userRolesStore, ThreadPool threadPool) {
super(FileRealmSettings.TYPE, config, threadPool);
this.userPasswdStore = userPasswdStore;
userPasswdStore.addListener(this::expireAll);
this.userRolesStore = userRolesStore;

View File

@ -67,7 +67,7 @@ public final class LdapRealm extends CachingUsernamePasswordRealm {
// pkg private for testing
LdapRealm(String type, RealmConfig config, SessionFactory sessionFactory,
UserRoleMapper roleMapper, ThreadPool threadPool) {
super(type, config);
super(type, config, threadPool);
this.sessionFactory = sessionFactory;
this.roleMapper = roleMapper;
this.threadPool = threadPool;

Some files were not shown because too many files have changed in this diff Show More