Merge branch 'master' into index-lifecycle

This commit is contained in:
Colin Goodheart-Smithe 2018-06-22 10:00:35 +01:00
commit d062556754
No known key found for this signature in database
GPG Key ID: F975E7BDD739B3C7
212 changed files with 2399 additions and 1272 deletions

View File

@ -19,8 +19,8 @@
package org.elasticsearch.plugin.noop.action.bulk;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
@ -30,7 +30,6 @@ import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
public class TransportNoopBulkAction extends HandledTransportAction<BulkRequest, BulkResponse> {
@ -38,9 +37,8 @@ public class TransportNoopBulkAction extends HandledTransportAction<BulkRequest,
new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED));
@Inject
public TransportNoopBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters) {
super(settings, NoopBulkAction.NAME, threadPool, transportService, actionFilters, BulkRequest::new);
public TransportNoopBulkAction(Settings settings, TransportService transportService, ActionFilters actionFilters) {
super(settings, NoopBulkAction.NAME, transportService, actionFilters, BulkRequest::new);
}
@Override

View File

@ -27,23 +27,20 @@ import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.profile.SearchProfileShardResults;
import org.elasticsearch.search.suggest.Suggest;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.Collections;
public class TransportNoopSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> {
@Inject
public TransportNoopSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters) {
super(settings, NoopSearchAction.NAME, threadPool, transportService, actionFilters,
(Writeable.Reader<SearchRequest>) SearchRequest::new);
public TransportNoopSearchAction(Settings settings, TransportService transportService, ActionFilters actionFilters) {
super(settings, NoopSearchAction.NAME, transportService, actionFilters, (Writeable.Reader<SearchRequest>) SearchRequest::new);
}
@Override

View File

@ -24,6 +24,8 @@ import org.elasticsearch.action.ingest.DeletePipelineRequest;
import org.elasticsearch.action.ingest.GetPipelineRequest;
import org.elasticsearch.action.ingest.GetPipelineResponse;
import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.action.ingest.SimulatePipelineRequest;
import org.elasticsearch.action.ingest.SimulatePipelineResponse;
import org.elasticsearch.action.ingest.WritePipelineResponse;
import java.io.IOException;
@ -125,4 +127,37 @@ public final class IngestClient {
restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::deletePipeline, options,
WritePipelineResponse::fromXContent, listener, emptySet());
}
/**
* Simulate a pipeline on a set of documents provided in the request
* <p>
* See
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html">
* Simulate Pipeline API on elastic.co</a>
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public SimulatePipelineResponse simulatePipeline(SimulatePipelineRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::simulatePipeline, options,
SimulatePipelineResponse::fromXContent, emptySet());
}
/**
* Asynchronously simulate a pipeline on a set of documents provided in the request
* <p>
* See
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html">
* Simulate Pipeline API on elastic.co</a>
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
*/
public void simulatePipelineAsync(SimulatePipelineRequest request,
RequestOptions options,
ActionListener<SimulatePipelineResponse> listener) {
restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::simulatePipeline, options,
SimulatePipelineResponse::fromXContent, listener, emptySet());
}
}

View File

@ -71,6 +71,7 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.ingest.DeletePipelineRequest;
import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.action.ingest.GetPipelineRequest;
import org.elasticsearch.action.ingest.SimulatePipelineRequest;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.SearchRequest;
@ -886,6 +887,20 @@ final class RequestConverters {
return request;
}
static Request simulatePipeline(SimulatePipelineRequest simulatePipelineRequest) throws IOException {
EndpointBuilder builder = new EndpointBuilder().addPathPartAsIs("_ingest/pipeline");
if (simulatePipelineRequest.getId() != null && !simulatePipelineRequest.getId().isEmpty()) {
builder.addPathPart(simulatePipelineRequest.getId());
}
builder.addPathPartAsIs("_simulate");
String endpoint = builder.build();
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
Params params = new Params(request);
params.putParam("verbose", Boolean.toString(simulatePipelineRequest.isVerbose()));
request.setEntity(createEntity(simulatePipelineRequest, REQUEST_BODY_CONTENT_TYPE));
return request;
}
static Request getAlias(GetAliasesRequest getAliasesRequest) {
String[] indices = getAliasesRequest.indices() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.indices();
String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases();

View File

@ -85,9 +85,7 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase {
}
}
protected static XContentBuilder buildRandomXContentPipeline() throws IOException {
XContentType xContentType = randomFrom(XContentType.values());
XContentBuilder pipelineBuilder = XContentBuilder.builder(xContentType.xContent());
protected static XContentBuilder buildRandomXContentPipeline(XContentBuilder pipelineBuilder) throws IOException {
pipelineBuilder.startObject();
{
pipelineBuilder.field(Pipeline.DESCRIPTION_KEY, "some random set of processors");
@ -114,6 +112,12 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase {
return pipelineBuilder;
}
protected static XContentBuilder buildRandomXContentPipeline() throws IOException {
XContentType xContentType = randomFrom(XContentType.values());
XContentBuilder pipelineBuilder = XContentBuilder.builder(xContentType.xContent());
return buildRandomXContentPipeline(pipelineBuilder);
}
protected static void createPipeline(String pipelineId) throws IOException {
XContentBuilder builder = buildRandomXContentPipeline();
createPipeline(new PutPipelineRequest(pipelineId, BytesReference.bytes(builder), builder.contentType()));

View File

@ -23,12 +23,22 @@ import org.elasticsearch.action.ingest.DeletePipelineRequest;
import org.elasticsearch.action.ingest.GetPipelineRequest;
import org.elasticsearch.action.ingest.GetPipelineResponse;
import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.action.ingest.SimulateDocumentBaseResult;
import org.elasticsearch.action.ingest.SimulateDocumentResult;
import org.elasticsearch.action.ingest.SimulateDocumentVerboseResult;
import org.elasticsearch.action.ingest.SimulatePipelineRequest;
import org.elasticsearch.action.ingest.SimulatePipelineResponse;
import org.elasticsearch.action.ingest.WritePipelineResponse;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.ingest.PipelineConfiguration;
import java.io.IOException;
import java.util.List;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.core.IsInstanceOf.instanceOf;
public class IngestClientIT extends ESRestHighLevelClientTestCase {
@ -80,4 +90,93 @@ public class IngestClientIT extends ESRestHighLevelClientTestCase {
execute(request, highLevelClient().ingest()::deletePipeline, highLevelClient().ingest()::deletePipelineAsync);
assertTrue(response.isAcknowledged());
}
public void testSimulatePipeline() throws IOException {
testSimulatePipeline(false, false);
}
public void testSimulatePipelineWithFailure() throws IOException {
testSimulatePipeline(false, true);
}
public void testSimulatePipelineVerbose() throws IOException {
testSimulatePipeline(true, false);
}
public void testSimulatePipelineVerboseWithFailure() throws IOException {
testSimulatePipeline(true, true);
}
private void testSimulatePipeline(boolean isVerbose,
boolean isFailure) throws IOException {
XContentType xContentType = randomFrom(XContentType.values());
XContentBuilder builder = XContentBuilder.builder(xContentType.xContent());
String rankValue = isFailure ? "non-int" : Integer.toString(1234);
builder.startObject();
{
builder.field("pipeline");
buildRandomXContentPipeline(builder);
builder.startArray("docs");
{
builder.startObject()
.field("_index", "index")
.field("_type", "doc")
.field("_id", "doc_" + 1)
.startObject("_source").field("foo", "rab_" + 1).field("rank", rankValue).endObject()
.endObject();
}
builder.endArray();
}
builder.endObject();
SimulatePipelineRequest request = new SimulatePipelineRequest(
BytesReference.bytes(builder),
builder.contentType()
);
request.setVerbose(isVerbose);
SimulatePipelineResponse response =
execute(request, highLevelClient().ingest()::simulatePipeline, highLevelClient().ingest()::simulatePipelineAsync);
List<SimulateDocumentResult> results = response.getResults();
assertEquals(1, results.size());
if (isVerbose) {
assertThat(results.get(0), instanceOf(SimulateDocumentVerboseResult.class));
SimulateDocumentVerboseResult verboseResult = (SimulateDocumentVerboseResult) results.get(0);
assertEquals(2, verboseResult.getProcessorResults().size());
if (isFailure) {
assertNotNull(verboseResult.getProcessorResults().get(1).getFailure());
assertThat(verboseResult.getProcessorResults().get(1).getFailure().getMessage(),
containsString("unable to convert [non-int] to integer"));
} else {
assertEquals(
verboseResult.getProcessorResults().get(0).getIngestDocument()
.getFieldValue("foo", String.class),
"bar"
);
assertEquals(
Integer.valueOf(1234),
verboseResult.getProcessorResults().get(1).getIngestDocument()
.getFieldValue("rank", Integer.class)
);
}
} else {
assertThat(results.get(0), instanceOf(SimulateDocumentBaseResult.class));
SimulateDocumentBaseResult baseResult = (SimulateDocumentBaseResult)results.get(0);
if (isFailure) {
assertNotNull(baseResult.getFailure());
assertThat(baseResult.getFailure().getMessage(),
containsString("unable to convert [non-int] to integer"));
} else {
assertNotNull(baseResult.getIngestDocument());
assertEquals(
baseResult.getIngestDocument().getFieldValue("foo", String.class),
"bar"
);
assertEquals(
Integer.valueOf(1234),
baseResult.getIngestDocument()
.getFieldValue("rank", Integer.class)
);
}
}
}
}

View File

@ -74,6 +74,7 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.ingest.DeletePipelineRequest;
import org.elasticsearch.action.ingest.GetPipelineRequest;
import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.action.ingest.SimulatePipelineRequest;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.SearchRequest;
@ -1534,6 +1535,34 @@ public class RequestConvertersTests extends ESTestCase {
assertEquals(expectedParams, expectedRequest.getParameters());
}
public void testSimulatePipeline() throws IOException {
String pipelineId = randomBoolean() ? "some_pipeline_id" : null;
boolean verbose = randomBoolean();
String json = "{\"pipeline\":{" +
"\"description\":\"_description\"," +
"\"processors\":[{\"set\":{\"field\":\"field2\",\"value\":\"_value\"}}]}," +
"\"docs\":[{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"rab\"}}]}";
SimulatePipelineRequest request = new SimulatePipelineRequest(
new BytesArray(json.getBytes(StandardCharsets.UTF_8)),
XContentType.JSON
);
request.setId(pipelineId);
request.setVerbose(verbose);
Map<String, String> expectedParams = new HashMap<>();
expectedParams.put("verbose", Boolean.toString(verbose));
Request expectedRequest = RequestConverters.simulatePipeline(request);
StringJoiner endpoint = new StringJoiner("/", "/", "");
endpoint.add("_ingest/pipeline");
if (pipelineId != null && !pipelineId.isEmpty())
endpoint.add(pipelineId);
endpoint.add("_simulate");
assertEquals(endpoint.toString(), expectedRequest.getEndpoint());
assertEquals(HttpPost.METHOD_NAME, expectedRequest.getMethod());
assertEquals(expectedParams, expectedRequest.getParameters());
assertToXContentBody(request, expectedRequest.getEntity());
}
public void testClusterHealth() {
ClusterHealthRequest healthRequest = new ClusterHealthRequest();
Map<String, String> expectedParams = new HashMap<>();

View File

@ -25,6 +25,12 @@ import org.elasticsearch.action.ingest.DeletePipelineRequest;
import org.elasticsearch.action.ingest.GetPipelineRequest;
import org.elasticsearch.action.ingest.GetPipelineResponse;
import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.action.ingest.SimulateDocumentBaseResult;
import org.elasticsearch.action.ingest.SimulateDocumentResult;
import org.elasticsearch.action.ingest.SimulateDocumentVerboseResult;
import org.elasticsearch.action.ingest.SimulatePipelineRequest;
import org.elasticsearch.action.ingest.SimulatePipelineResponse;
import org.elasticsearch.action.ingest.SimulateProcessorResult;
import org.elasticsearch.action.ingest.WritePipelineResponse;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.RequestOptions;
@ -277,4 +283,109 @@ public class IngestClientDocumentationIT extends ESRestHighLevelClientTestCase {
}
}
public void testSimulatePipeline() throws IOException {
RestHighLevelClient client = highLevelClient();
{
// tag::simulate-pipeline-request
String source =
"{\"" +
"pipeline\":{" +
"\"description\":\"_description\"," +
"\"processors\":[{\"set\":{\"field\":\"field2\",\"value\":\"_value\"}}]" +
"}," +
"\"docs\":[" +
"{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"bar\"}}," +
"{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"rab\"}}" +
"]" +
"}";
SimulatePipelineRequest request = new SimulatePipelineRequest(
new BytesArray(source.getBytes(StandardCharsets.UTF_8)), // <1>
XContentType.JSON // <2>
);
// end::simulate-pipeline-request
// tag::simulate-pipeline-request-pipeline-id
request.setId("my-pipeline-id"); // <1>
// end::simulate-pipeline-request-pipeline-id
// For testing we set this back to null
request.setId(null);
// tag::simulate-pipeline-request-verbose
request.setVerbose(true); // <1>
// end::simulate-pipeline-request-verbose
// tag::simulate-pipeline-execute
SimulatePipelineResponse response = client.ingest().simulatePipeline(request, RequestOptions.DEFAULT); // <1>
// end::simulate-pipeline-execute
// tag::simulate-pipeline-response
for (SimulateDocumentResult result: response.getResults()) { // <1>
if (request.isVerbose()) {
assert result instanceof SimulateDocumentVerboseResult;
SimulateDocumentVerboseResult verboseResult = (SimulateDocumentVerboseResult)result; // <2>
for (SimulateProcessorResult processorResult: verboseResult.getProcessorResults()) { // <3>
processorResult.getIngestDocument(); // <4>
processorResult.getFailure(); // <5>
}
} else {
assert result instanceof SimulateDocumentBaseResult;
SimulateDocumentBaseResult baseResult = (SimulateDocumentBaseResult)result; // <6>
baseResult.getIngestDocument(); // <7>
baseResult.getFailure(); // <8>
}
}
// end::simulate-pipeline-response
assert(response.getResults().size() > 0);
}
}
public void testSimulatePipelineAsync() throws Exception {
RestHighLevelClient client = highLevelClient();
{
String source =
"{\"" +
"pipeline\":{" +
"\"description\":\"_description\"," +
"\"processors\":[{\"set\":{\"field\":\"field2\",\"value\":\"_value\"}}]" +
"}," +
"\"docs\":[" +
"{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"bar\"}}," +
"{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"rab\"}}" +
"]" +
"}";
SimulatePipelineRequest request = new SimulatePipelineRequest(
new BytesArray(source.getBytes(StandardCharsets.UTF_8)),
XContentType.JSON
);
// tag::simulate-pipeline-execute-listener
ActionListener<SimulatePipelineResponse> listener =
new ActionListener<SimulatePipelineResponse>() {
@Override
public void onResponse(SimulatePipelineResponse response) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::simulate-pipeline-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::simulate-pipeline-execute-async
client.ingest().simulatePipelineAsync(request, RequestOptions.DEFAULT, listener); // <1>
// end::simulate-pipeline-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
}

View File

@ -42,9 +42,7 @@ import java.util.concurrent.TimeUnit;
import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes;
import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode;
import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode;
import static org.hamcrest.Matchers.startsWith;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ -216,8 +214,10 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
restClient.performRequest(request);
fail("expected to fail to connect");
} catch (ConnectException e) {
// This is different in windows and linux but this matches both.
assertThat(e.getMessage(), startsWith("Connection refused"));
// Windows isn't consistent here. Sometimes the message is even null!
if (false == System.getProperty("os.name").startsWith("Windows")) {
assertEquals("Connection refused", e.getMessage());
}
}
} else {
Response response = restClient.performRequest(request);

View File

@ -0,0 +1,90 @@
[[java-rest-high-ingest-simulate-pipeline]]
=== Simulate Pipeline API
[[java-rest-high-ingest-simulate-pipeline-request]]
==== Simulate Pipeline Request
A `SimulatePipelineRequest` requires a source and a `XContentType`. The source consists
of the request body. See the https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html[docs]
for more details on the request body.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-request]
--------------------------------------------------
<1> The request body as a `ByteArray`.
<2> The XContentType for the request body supplied above.
==== Optional arguments
The following arguments can optionally be provided:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-request-pipeline-id]
--------------------------------------------------
<1> You can either specify an existing pipeline to execute against the provided documents, or supply a
pipeline definition in the body of the request. This option sets the id for an existing pipeline.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-request-verbose]
--------------------------------------------------
<1> To see the intermediate results of each processor in the simulate request, you can add the verbose parameter
to the request.
[[java-rest-high-ingest-simulate-pipeline-sync]]
==== Synchronous Execution
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-execute]
--------------------------------------------------
<1> Execute the request and get back the response in a `SimulatePipelineResponse` object.
[[java-rest-high-ingest-simulate-pipeline-async]]
==== Asynchronous Execution
The asynchronous execution of a simulate pipeline request requires both the `SimulatePipelineRequest`
instance and an `ActionListener` instance to be passed to the asynchronous
method:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-execute-async]
--------------------------------------------------
<1> The `SimulatePipelineRequest` to execute and the `ActionListener` to use when
the execution completes
The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back using the `onResponse` method
if the execution successfully completed or using the `onFailure` method if
it failed.
A typical listener for `SimulatePipelineResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed. The response is
provided as an argument
<2> Called in case of failure. The raised exception is provided as an argument
[[java-rest-high-ingest-simulate-pipeline-response]]
==== Simulate Pipeline Response
The returned `SimulatePipelineResponse` allows to retrieve information about the executed
operation as follows:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-response]
--------------------------------------------------
<1> Get results for each of the documents provided as instance of `List<SimulateDocumentResult>`.
<2> If the request was in verbose mode cast the response to `SimulateDocumentVerboseResult`.
<3> Check the result after each processor is applied.
<4> Get the ingest document for the result obtained in 3.
<5> Or get the failure for the result obtained in 3.
<6> Get the result as `SimulateDocumentBaseResult` if the result was not verbose.
<7> Get the ingest document for the result obtained in 6.
<8> Or get the failure for the result obtained in 6.

View File

@ -123,10 +123,12 @@ The Java High Level REST Client supports the following Ingest APIs:
* <<java-rest-high-ingest-put-pipeline>>
* <<java-rest-high-ingest-get-pipeline>>
* <<java-rest-high-ingest-delete-pipeline>>
* <<java-rest-high-ingest-simulate-pipeline>>
include::ingest/put_pipeline.asciidoc[]
include::ingest/get_pipeline.asciidoc[]
include::ingest/delete_pipeline.asciidoc[]
include::ingest/simulate_pipeline.asciidoc[]
== Snapshot APIs

View File

@ -41,7 +41,6 @@ import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.action.RestBuilderListener;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@ -114,9 +113,8 @@ public class GrokProcessorGetAction extends Action<GrokProcessorGetAction.Respon
public static class TransportAction extends HandledTransportAction<Request, Response> {
@Inject
public TransportAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters) {
super(settings, NAME, threadPool, transportService, actionFilters, Request::new);
public TransportAction(Settings settings, TransportService transportService, ActionFilters actionFilters) {
super(settings, NAME, transportService, actionFilters, Request::new);
}
@Override

View File

@ -30,7 +30,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
@ -45,10 +44,10 @@ public class TransportMultiSearchTemplateAction extends HandledTransportAction<M
private final NodeClient client;
@Inject
public TransportMultiSearchTemplateAction(Settings settings, ThreadPool threadPool, TransportService transportService,
public TransportMultiSearchTemplateAction(Settings settings, TransportService transportService,
ActionFilters actionFilters, ScriptService scriptService,
NamedXContentRegistry xContentRegistry, NodeClient client) {
super(settings, MultiSearchTemplateAction.NAME, threadPool, transportService, actionFilters, MultiSearchTemplateRequest::new);
super(settings, MultiSearchTemplateAction.NAME, transportService, actionFilters, MultiSearchTemplateRequest::new);
this.scriptService = scriptService;
this.xContentRegistry = xContentRegistry;
this.client = client;

View File

@ -38,7 +38,6 @@ import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.script.TemplateScript;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@ -54,11 +53,10 @@ public class TransportSearchTemplateAction extends HandledTransportAction<Search
private final NodeClient client;
@Inject
public TransportSearchTemplateAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, ScriptService scriptService, NamedXContentRegistry xContentRegistry,
NodeClient client) {
super(settings, SearchTemplateAction.NAME, threadPool, transportService, actionFilters,
(Supplier<SearchTemplateRequest>) SearchTemplateRequest::new);
public TransportSearchTemplateAction(Settings settings, TransportService transportService, ActionFilters actionFilters,
ScriptService scriptService, NamedXContentRegistry xContentRegistry, NodeClient client) {
super(settings, SearchTemplateAction.NAME, transportService, actionFilters,
(Supplier<SearchTemplateRequest>) SearchTemplateRequest::new);
this.scriptService = scriptService;
this.xContentRegistry = xContentRegistry;
this.client = client;

View File

@ -48,7 +48,6 @@ import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@ -280,9 +279,9 @@ public class PainlessExecuteAction extends Action<PainlessExecuteAction.Response
private final ScriptService scriptService;
@Inject
public TransportAction(Settings settings, ThreadPool threadPool, TransportService transportService,
public TransportAction(Settings settings, TransportService transportService,
ActionFilters actionFilters, ScriptService scriptService) {
super(settings, NAME, threadPool, transportService, actionFilters, Request::new);
super(settings, NAME, transportService, actionFilters, Request::new);
this.scriptService = scriptService;
}
@Override

View File

@ -40,7 +40,6 @@ import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.TemplateScript;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@ -73,10 +72,10 @@ public class TransportRankEvalAction extends HandledTransportAction<RankEvalRequ
private final NamedXContentRegistry namedXContentRegistry;
@Inject
public TransportRankEvalAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, Client client,
public TransportRankEvalAction(Settings settings, ActionFilters actionFilters, Client client,
TransportService transportService, ScriptService scriptService,
NamedXContentRegistry namedXContentRegistry) {
super(settings, RankEvalAction.NAME, threadPool, transportService, actionFilters,
super(settings, RankEvalAction.NAME, transportService, actionFilters,
(Writeable.Reader<RankEvalRequest>) RankEvalRequest::new);
this.scriptService = scriptService;
this.namedXContentRegistry = namedXContentRegistry;

View File

@ -19,8 +19,6 @@
package org.elasticsearch.index.reindex;
import java.util.function.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
@ -35,7 +33,11 @@ import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.function.Supplier;
public class TransportDeleteByQueryAction extends HandledTransportAction<DeleteByQueryRequest, BulkByScrollResponse> {
private final ThreadPool threadPool;
private final Client client;
private final ScriptService scriptService;
private final ClusterService clusterService;
@ -43,8 +45,9 @@ public class TransportDeleteByQueryAction extends HandledTransportAction<DeleteB
@Inject
public TransportDeleteByQueryAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, Client client,
TransportService transportService, ScriptService scriptService, ClusterService clusterService) {
super(settings, DeleteByQueryAction.NAME, threadPool, transportService, actionFilters,
super(settings, DeleteByQueryAction.NAME, transportService, actionFilters,
(Supplier<DeleteByQueryRequest>) DeleteByQueryRequest::new);
this.threadPool = threadPool;
this.client = client;
this.scriptService = scriptService;
this.clusterService = clusterService;

View File

@ -92,6 +92,7 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
public static final Setting<List<String>> REMOTE_CLUSTER_WHITELIST =
Setting.listSetting("reindex.remote.whitelist", emptyList(), Function.identity(), Property.NodeScope);
private final ThreadPool threadPool;
private final ClusterService clusterService;
private final ScriptService scriptService;
private final AutoCreateIndex autoCreateIndex;
@ -103,8 +104,8 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
public TransportReindexAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, ScriptService scriptService,
AutoCreateIndex autoCreateIndex, Client client, TransportService transportService) {
super(settings, ReindexAction.NAME, threadPool, transportService, actionFilters,
ReindexRequest::new);
super(settings, ReindexAction.NAME, transportService, actionFilters, ReindexRequest::new);
this.threadPool = threadPool;
this.clusterService = clusterService;
this.scriptService = scriptService;
this.autoCreateIndex = autoCreateIndex;

View File

@ -43,9 +43,9 @@ public class TransportRethrottleAction extends TransportTasksAction<BulkByScroll
private final Client client;
@Inject
public TransportRethrottleAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
public TransportRethrottleAction(Settings settings, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters, Client client) {
super(settings, RethrottleAction.NAME, threadPool, clusterService, transportService, actionFilters,
super(settings, RethrottleAction.NAME, clusterService, transportService, actionFilters,
RethrottleRequest::new, ListTasksResponse::new, ThreadPool.Names.MANAGEMENT);
this.client = client;
}

View File

@ -46,6 +46,8 @@ import java.util.function.BiFunction;
import java.util.function.Supplier;
public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateByQueryRequest, BulkByScrollResponse> {
private final ThreadPool threadPool;
private final Client client;
private final ScriptService scriptService;
private final ClusterService clusterService;
@ -53,8 +55,9 @@ public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateB
@Inject
public TransportUpdateByQueryAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, Client client,
TransportService transportService, ScriptService scriptService, ClusterService clusterService) {
super(settings, UpdateByQueryAction.NAME, threadPool, transportService, actionFilters,
super(settings, UpdateByQueryAction.NAME, transportService, actionFilters,
(Supplier<UpdateByQueryRequest>) UpdateByQueryRequest::new);
this.threadPool = threadPool;
this.client = client;
this.scriptService = scriptService;
this.clusterService = clusterService;

View File

@ -13,3 +13,24 @@
indices.get_mapping:
index: test_index
---
"Index missing, ignore_unavailable=true":
- skip:
version: " - 6.99.99"
reason: ignore_unavailable was ignored in previous versions
- do:
indices.get_mapping:
index: test_index
ignore_unavailable: true
- match: { '': {} }
---
"Index missing, ignore_unavailable=true, allow_no_indices=false":
- do:
catch: missing
indices.get_mapping:
index: test_index
ignore_unavailable: true
allow_no_indices: false

View File

@ -94,12 +94,26 @@ setup:
---
"Get test-* with wildcard_expansion=none":
- skip:
version: " - 6.99.99"
reason: allow_no_indices (defaults to true) was ignored in previous versions
- do:
indices.get_mapping:
index: test-x*
expand_wildcards: none
- match: { '': {} }
---
"Get test-* with wildcard_expansion=none allow_no_indices=false":
- skip:
version: " - 6.99.99"
reason: allow_no_indices was ignored in previous versions
- do:
catch: missing
indices.get_mapping:
index: test-x*
expand_wildcards: none
allow_no_indices: false
---
"Get test-* with wildcard_expansion=open,closed":

View File

@ -62,9 +62,9 @@ public class TransportCancelTasksAction extends TransportTasksAction<Cancellable
public static final String BAN_PARENT_ACTION_NAME = "internal:admin/tasks/ban";
@Inject
public TransportCancelTasksAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
public TransportCancelTasksAction(Settings settings, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters) {
super(settings, CancelTasksAction.NAME, threadPool, clusterService, transportService, actionFilters,
super(settings, CancelTasksAction.NAME, clusterService, transportService, actionFilters,
CancelTasksRequest::new, CancelTasksResponse::new, ThreadPool.Names.MANAGEMENT);
transportService.registerRequestHandler(BAN_PARENT_ACTION_NAME, BanParentTaskRequest::new, ThreadPool.Names.SAME, new
BanParentRequestHandler());

View File

@ -64,6 +64,7 @@ import static org.elasticsearch.action.admin.cluster.node.tasks.list.TransportLi
* </ul>
*/
public class TransportGetTaskAction extends HandledTransportAction<GetTaskRequest, GetTaskResponse> {
private final ThreadPool threadPool;
private final ClusterService clusterService;
private final TransportService transportService;
private final Client client;
@ -72,7 +73,8 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
@Inject
public TransportGetTaskAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters,
ClusterService clusterService, Client client, NamedXContentRegistry xContentRegistry) {
super(settings, GetTaskAction.NAME, threadPool, transportService, actionFilters, GetTaskRequest::new);
super(settings, GetTaskAction.NAME, transportService, actionFilters, GetTaskRequest::new);
this.threadPool = threadPool;
this.clusterService = clusterService;
this.transportService = transportService;
this.client = client;

View File

@ -51,9 +51,9 @@ public class TransportListTasksAction extends TransportTasksAction<Task, ListTas
private static final TimeValue DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT = timeValueSeconds(30);
@Inject
public TransportListTasksAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
public TransportListTasksAction(Settings settings, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters) {
super(settings, ListTasksAction.NAME, threadPool, clusterService, transportService, actionFilters,
super(settings, ListTasksAction.NAME, clusterService, transportService, actionFilters,
ListTasksRequest::new, ListTasksResponse::new, ThreadPool.Names.MANAGEMENT);
}

View File

@ -28,7 +28,6 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import static java.util.stream.Collectors.toList;
@ -38,9 +37,9 @@ public final class TransportRemoteInfoAction extends HandledTransportAction<Remo
private final RemoteClusterService remoteClusterService;
@Inject
public TransportRemoteInfoAction(Settings settings, ThreadPool threadPool, TransportService transportService,
public TransportRemoteInfoAction(Settings settings, TransportService transportService,
ActionFilters actionFilters, SearchTransportService searchTransportService) {
super(settings, RemoteInfoAction.NAME, threadPool, transportService, actionFilters,
super(settings, RemoteInfoAction.NAME, transportService, actionFilters,
(Supplier<RemoteInfoRequest>) RemoteInfoRequest::new);
this.remoteClusterService = searchTransportService.getRemoteClusterService();
}

View File

@ -49,10 +49,10 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
private final IndicesService indicesService;
@Inject
public TransportClearIndicesCacheAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
public TransportClearIndicesCacheAction(Settings settings, ClusterService clusterService,
TransportService transportService, IndicesService indicesService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters,
super(settings, ClearIndicesCacheAction.NAME, clusterService, transportService, actionFilters,
indexNameExpressionResolver, ClearIndicesCacheRequest::new, ThreadPool.Names.MANAGEMENT, false);
this.indicesService = indicesService;
}

View File

@ -28,7 +28,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.List;
@ -39,11 +38,10 @@ import java.util.List;
public class TransportFlushAction extends TransportBroadcastReplicationAction<FlushRequest, FlushResponse, ShardFlushRequest, ReplicationResponse> {
@Inject
public TransportFlushAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
public TransportFlushAction(Settings settings, ClusterService clusterService, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
TransportShardFlushAction replicatedFlushAction) {
super(FlushAction.NAME, FlushRequest::new, settings, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, replicatedFlushAction);
super(FlushAction.NAME, FlushRequest::new, settings, clusterService, transportService, actionFilters, indexNameExpressionResolver, replicatedFlushAction);
}
@Override

View File

@ -27,7 +27,6 @@ import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
/**
@ -38,9 +37,9 @@ public class TransportSyncedFlushAction extends HandledTransportAction<SyncedFlu
SyncedFlushService syncedFlushService;
@Inject
public TransportSyncedFlushAction(Settings settings, ThreadPool threadPool, TransportService transportService,
public TransportSyncedFlushAction(Settings settings, TransportService transportService,
ActionFilters actionFilters, SyncedFlushService syncedFlushService) {
super(settings, SyncedFlushAction.NAME, threadPool, transportService, actionFilters,
super(settings, SyncedFlushAction.NAME, transportService, actionFilters,
(Supplier<SyncedFlushRequest>) SyncedFlushRequest::new);
this.syncedFlushService = syncedFlushService;
}

View File

@ -48,10 +48,10 @@ public class TransportForceMergeAction extends TransportBroadcastByNodeAction<Fo
private final IndicesService indicesService;
@Inject
public TransportForceMergeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
public TransportForceMergeAction(Settings settings, ClusterService clusterService,
TransportService transportService, IndicesService indicesService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ForceMergeAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
super(settings, ForceMergeAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver,
ForceMergeRequest::new, ThreadPool.Names.FORCE_MERGE);
this.indicesService = indicesService;
}

View File

@ -27,7 +27,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.HashMap;
@ -45,9 +44,9 @@ public class TransportGetFieldMappingsAction extends HandledTransportAction<GetF
@Inject
public TransportGetFieldMappingsAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, TransportGetFieldMappingsIndexAction shardAction,
TransportGetFieldMappingsIndexAction shardAction,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, GetFieldMappingsAction.NAME, threadPool, transportService, actionFilters, GetFieldMappingsRequest::new);
super(settings, GetFieldMappingsAction.NAME, transportService, actionFilters, GetFieldMappingsRequest::new);
this.clusterService = clusterService;
this.shardAction = shardAction;
this.indexNameExpressionResolver = indexNameExpressionResolver;

View File

@ -54,10 +54,10 @@ public class TransportRecoveryAction extends TransportBroadcastByNodeAction<Reco
private final IndicesService indicesService;
@Inject
public TransportRecoveryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
public TransportRecoveryAction(Settings settings, ClusterService clusterService,
TransportService transportService, IndicesService indicesService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, RecoveryAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
super(settings, RecoveryAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver,
RecoveryRequest::new, ThreadPool.Names.MANAGEMENT);
this.indicesService = indicesService;
}

View File

@ -30,7 +30,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.List;
@ -41,11 +40,11 @@ import java.util.List;
public class TransportRefreshAction extends TransportBroadcastReplicationAction<RefreshRequest, RefreshResponse, BasicReplicationRequest, ReplicationResponse> {
@Inject
public TransportRefreshAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
public TransportRefreshAction(Settings settings, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
TransportShardRefreshAction shardRefreshAction) {
super(RefreshAction.NAME, RefreshRequest::new, settings, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, shardRefreshAction);
super(RefreshAction.NAME, RefreshRequest::new, settings, clusterService, transportService, actionFilters, indexNameExpressionResolver, shardRefreshAction);
}
@Override

View File

@ -46,9 +46,9 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeActi
private final IndicesService indicesService;
@Inject
public TransportIndicesSegmentsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
public TransportIndicesSegmentsAction(Settings settings, ClusterService clusterService, TransportService transportService,
IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, IndicesSegmentsAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
super(settings, IndicesSegmentsAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver,
IndicesSegmentsRequest::new, ThreadPool.Names.MANAGEMENT);
this.indicesService = indicesService;
}

View File

@ -47,10 +47,10 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
private final IndicesService indicesService;
@Inject
public TransportIndicesStatsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
public TransportIndicesStatsAction(Settings settings, ClusterService clusterService,
TransportService transportService, IndicesService indicesService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, IndicesStatsAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
super(settings, IndicesStatsAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver,
IndicesStatsRequest::new, ThreadPool.Names.MANAGEMENT);
this.indicesService = indicesService;
}

View File

@ -48,9 +48,9 @@ public class TransportUpgradeStatusAction extends TransportBroadcastByNodeAction
private final IndicesService indicesService;
@Inject
public TransportUpgradeStatusAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
public TransportUpgradeStatusAction(Settings settings, ClusterService clusterService, TransportService transportService,
IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, UpgradeStatusAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
super(settings, UpgradeStatusAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver,
UpgradeStatusRequest::new, ThreadPool.Names.MANAGEMENT);
this.indicesService = indicesService;
}

View File

@ -62,10 +62,10 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
private final NodeClient client;
@Inject
public TransportUpgradeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
public TransportUpgradeAction(Settings settings, ClusterService clusterService,
TransportService transportService, IndicesService indicesService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, NodeClient client) {
super(settings, UpgradeAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, UpgradeRequest::new, ThreadPool.Names.FORCE_MERGE);
super(settings, UpgradeAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver, UpgradeRequest::new, ThreadPool.Names.FORCE_MERGE);
this.indicesService = indicesService;
this.client = client;
}

View File

@ -64,10 +64,10 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
private final SearchService searchService;
@Inject
public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
public TransportValidateQueryAction(Settings settings, ClusterService clusterService,
TransportService transportService, SearchService searchService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ValidateQueryAction.NAME, threadPool, clusterService, transportService, actionFilters,
super(settings, ValidateQueryAction.NAME, clusterService, transportService, actionFilters,
indexNameExpressionResolver, ValidateQueryRequest::new, ShardValidateQueryRequest::new, ThreadPool.Names.SEARCH);
this.searchService = searchService;
}

View File

@ -84,6 +84,7 @@ import static java.util.Collections.emptyMap;
*/
public class TransportBulkAction extends HandledTransportAction<BulkRequest, BulkResponse> {
private final ThreadPool threadPool;
private final AutoCreateIndex autoCreateIndex;
private final ClusterService clusterService;
private final IngestService ingestService;
@ -108,8 +109,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
TransportShardBulkAction shardBulkAction, NodeClient client,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
AutoCreateIndex autoCreateIndex, LongSupplier relativeTimeProvider) {
super(settings, BulkAction.NAME, threadPool, transportService, actionFilters, BulkRequest::new);
super(settings, BulkAction.NAME, transportService, actionFilters, BulkRequest::new);
Objects.requireNonNull(relativeTimeProvider);
this.threadPool = threadPool;
this.clusterService = clusterService;
this.ingestService = ingestService;
this.shardBulkAction = shardBulkAction;

View File

@ -76,6 +76,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
private static final Logger logger = ESLoggerFactory.getLogger(TransportShardBulkAction.class);
private final ThreadPool threadPool;
private final UpdateHelper updateHelper;
private final MappingUpdatedAction mappingUpdatedAction;
@ -86,6 +87,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters,
indexNameExpressionResolver, BulkShardRequest::new, BulkShardRequest::new, ThreadPool.Names.WRITE);
this.threadPool = threadPool;
this.updateHelper = updateHelper;
this.mappingUpdatedAction = mappingUpdatedAction;
}

View File

@ -43,6 +43,7 @@ import java.util.List;
import java.util.Map;
public class TransportFieldCapabilitiesAction extends HandledTransportAction<FieldCapabilitiesRequest, FieldCapabilitiesResponse> {
private final ThreadPool threadPool;
private final ClusterService clusterService;
private final TransportFieldCapabilitiesIndexAction shardAction;
private final RemoteClusterService remoteClusterService;
@ -53,7 +54,8 @@ public class TransportFieldCapabilitiesAction extends HandledTransportAction<Fie
ClusterService clusterService, ThreadPool threadPool,
TransportFieldCapabilitiesIndexAction shardAction,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, FieldCapabilitiesAction.NAME, threadPool, transportService, actionFilters, FieldCapabilitiesRequest::new);
super(settings, FieldCapabilitiesAction.NAME, transportService, actionFilters, FieldCapabilitiesRequest::new);
this.threadPool = threadPool;
this.clusterService = clusterService;
this.remoteClusterService = transportService.getRemoteClusterService();
this.shardAction = shardAction;

View File

@ -30,7 +30,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.HashMap;
@ -44,10 +43,10 @@ public class TransportMultiGetAction extends HandledTransportAction<MultiGetRequ
private final IndexNameExpressionResolver indexNameExpressionResolver;
@Inject
public TransportMultiGetAction(Settings settings, ThreadPool threadPool, TransportService transportService,
public TransportMultiGetAction(Settings settings, TransportService transportService,
ClusterService clusterService, TransportShardMultiGetAction shardAction,
ActionFilters actionFilters, IndexNameExpressionResolver resolver) {
super(settings, MultiGetAction.NAME, threadPool, transportService, actionFilters, MultiGetRequest::new);
super(settings, MultiGetAction.NAME, transportService, actionFilters, MultiGetRequest::new);
this.clusterService = clusterService;
this.shardAction = shardAction;
this.indexNameExpressionResolver = resolver;

View File

@ -19,13 +19,18 @@
package org.elasticsearch.action.ingest;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.ingest.IngestDocument;
import java.io.IOException;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
/**
* Holds the end result of what a pipeline did to sample document provided via the simulate api.
*/
@ -33,6 +38,33 @@ public final class SimulateDocumentBaseResult implements SimulateDocumentResult
private final WriteableIngestDocument ingestDocument;
private final Exception failure;
public static final ConstructingObjectParser<SimulateDocumentBaseResult, Void> PARSER =
new ConstructingObjectParser<>(
"simulate_document_base_result",
true,
a -> {
if (a[1] == null) {
assert a[0] != null;
return new SimulateDocumentBaseResult(((WriteableIngestDocument)a[0]).getIngestDocument());
} else {
assert a[0] == null;
return new SimulateDocumentBaseResult((ElasticsearchException)a[1]);
}
}
);
static {
PARSER.declareObject(
optionalConstructorArg(),
WriteableIngestDocument.INGEST_DOC_PARSER,
new ParseField(WriteableIngestDocument.DOC_FIELD)
);
PARSER.declareObject(
optionalConstructorArg(),
(p, c) -> ElasticsearchException.fromXContent(p),
new ParseField("error")
);
}
public SimulateDocumentBaseResult(IngestDocument ingestDocument) {
this.ingestDocument = new WriteableIngestDocument(ingestDocument);
failure = null;
@ -89,4 +121,8 @@ public final class SimulateDocumentBaseResult implements SimulateDocumentResult
builder.endObject();
return builder;
}
public static SimulateDocumentBaseResult fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
}

View File

@ -18,21 +18,38 @@
*/
package org.elasticsearch.action.ingest;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
/**
* Holds the result of what a pipeline did to a sample document via the simulate api, but instead of {@link SimulateDocumentBaseResult}
* this result class holds the intermediate result each processor did to the sample document.
*/
public final class SimulateDocumentVerboseResult implements SimulateDocumentResult {
public static final String PROCESSOR_RESULT_FIELD = "processor_results";
private final List<SimulateProcessorResult> processorResults;
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<SimulateDocumentVerboseResult, Void> PARSER =
new ConstructingObjectParser<>(
"simulate_document_verbose_result",
true,
a -> new SimulateDocumentVerboseResult((List<SimulateProcessorResult>)a[0])
);
static {
PARSER.declareObjectArray(constructorArg(), SimulateProcessorResult.PARSER, new ParseField(PROCESSOR_RESULT_FIELD));
}
public SimulateDocumentVerboseResult(List<SimulateProcessorResult> processorResults) {
this.processorResults = processorResults;
}
@ -63,7 +80,7 @@ public final class SimulateDocumentVerboseResult implements SimulateDocumentResu
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.startArray("processor_results");
builder.startArray(PROCESSOR_RESULT_FIELD);
for (SimulateProcessorResult processorResult : processorResults) {
processorResult.toXContent(builder, params);
}
@ -71,4 +88,8 @@ public final class SimulateDocumentVerboseResult implements SimulateDocumentResu
builder.endObject();
return builder;
}
public static SimulateDocumentVerboseResult fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
}

View File

@ -25,6 +25,8 @@ import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
@ -42,7 +44,7 @@ import java.util.Objects;
import static org.elasticsearch.ingest.IngestDocument.MetaData;
public class SimulatePipelineRequest extends ActionRequest {
public class SimulatePipelineRequest extends ActionRequest implements ToXContentObject {
private String id;
private boolean verbose;
@ -126,6 +128,12 @@ public class SimulatePipelineRequest extends ActionRequest {
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.rawValue(source.streamInput(), xContentType);
return builder;
}
public static final class Fields {
static final String PIPELINE = "pipeline";
static final String DOCS = "docs";

View File

@ -19,22 +19,90 @@
package org.elasticsearch.action.ingest;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
public class SimulatePipelineResponse extends ActionResponse implements ToXContentObject {
private String pipelineId;
private boolean verbose;
private List<SimulateDocumentResult> results;
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<SimulatePipelineResponse, Void> PARSER =
new ConstructingObjectParser<>(
"simulate_pipeline_response",
true,
a -> {
List<SimulateDocumentResult> results = (List<SimulateDocumentResult>)a[0];
boolean verbose = false;
if (results.size() > 0) {
if (results.get(0) instanceof SimulateDocumentVerboseResult) {
verbose = true;
}
}
return new SimulatePipelineResponse(null, verbose, results);
}
);
static {
PARSER.declareObjectArray(
constructorArg(),
(parser, context) -> {
Token token = parser.currentToken();
ensureExpectedToken(Token.START_OBJECT, token, parser::getTokenLocation);
SimulateDocumentResult result = null;
while ((token = parser.nextToken()) != Token.END_OBJECT) {
ensureExpectedToken(token, Token.FIELD_NAME, parser::getTokenLocation);
String fieldName = parser.currentName();
token = parser.nextToken();
if (token == Token.START_ARRAY) {
if (fieldName.equals(SimulateDocumentVerboseResult.PROCESSOR_RESULT_FIELD)) {
List<SimulateProcessorResult> results = new ArrayList<>();
while ((token = parser.nextToken()) == Token.START_OBJECT) {
results.add(SimulateProcessorResult.fromXContent(parser));
}
ensureExpectedToken(Token.END_ARRAY, token, parser::getTokenLocation);
result = new SimulateDocumentVerboseResult(results);
} else {
parser.skipChildren();
}
} else if (token.equals(Token.START_OBJECT)) {
switch (fieldName) {
case WriteableIngestDocument.DOC_FIELD:
result = new SimulateDocumentBaseResult(
WriteableIngestDocument.INGEST_DOC_PARSER.apply(parser, null).getIngestDocument()
);
break;
case "error":
result = new SimulateDocumentBaseResult(ElasticsearchException.fromXContent(parser));
break;
default:
parser.skipChildren();
break;
}
} // else it is a value skip it
}
assert result != null;
return result;
},
new ParseField(Fields.DOCUMENTS));
}
public SimulatePipelineResponse() {
}
@ -98,6 +166,10 @@ public class SimulatePipelineResponse extends ActionResponse implements ToXConte
return builder;
}
public static SimulatePipelineResponse fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
static final class Fields {
static final String DOCUMENTS = "docs";
}

View File

@ -41,7 +41,7 @@ public class SimulatePipelineTransportAction extends HandledTransportAction<Simu
@Inject
public SimulatePipelineTransportAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, NodeService nodeService) {
super(settings, SimulatePipelineAction.NAME, threadPool, transportService, actionFilters,
super(settings, SimulatePipelineAction.NAME, transportService, actionFilters,
(Writeable.Reader<SimulatePipelineRequest>) SimulatePipelineRequest::new);
this.pipelineStore = nodeService.getIngestService().getPipelineStore();
this.executionService = new SimulateExecutionService(threadPool);

View File

@ -19,33 +19,91 @@
package org.elasticsearch.action.ingest;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.ingest.ConfigurationUtils;
import org.elasticsearch.ingest.IngestDocument;
import java.io.IOException;
class SimulateProcessorResult implements Writeable, ToXContentObject {
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
public class SimulateProcessorResult implements Writeable, ToXContentObject {
private static final String IGNORED_ERROR_FIELD = "ignored_error";
private final String processorTag;
private final WriteableIngestDocument ingestDocument;
private final Exception failure;
SimulateProcessorResult(String processorTag, IngestDocument ingestDocument, Exception failure) {
@SuppressWarnings("unchecked")
private static final ConstructingObjectParser<ElasticsearchException, Void> IGNORED_ERROR_PARSER =
new ConstructingObjectParser<>(
"ignored_error_parser",
true,
a -> (ElasticsearchException)a[0]
);
static {
IGNORED_ERROR_PARSER.declareObject(
constructorArg(),
(p, c) -> ElasticsearchException.fromXContent(p),
new ParseField("error")
);
}
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<SimulateProcessorResult, Void> PARSER =
new ConstructingObjectParser<>(
"simulate_processor_result",
true,
a -> {
String processorTag = a[0] == null ? null : (String)a[0];
IngestDocument document = a[1] == null ? null : ((WriteableIngestDocument)a[1]).getIngestDocument();
Exception failure = null;
if (a[2] != null) {
failure = (ElasticsearchException)a[2];
} else if (a[3] != null) {
failure = (ElasticsearchException)a[3];
}
return new SimulateProcessorResult(processorTag, document, failure);
}
);
static {
PARSER.declareString(optionalConstructorArg(), new ParseField(ConfigurationUtils.TAG_KEY));
PARSER.declareObject(
optionalConstructorArg(),
WriteableIngestDocument.INGEST_DOC_PARSER,
new ParseField(WriteableIngestDocument.DOC_FIELD)
);
PARSER.declareObject(
optionalConstructorArg(),
IGNORED_ERROR_PARSER,
new ParseField(IGNORED_ERROR_FIELD)
);
PARSER.declareObject(
optionalConstructorArg(),
(p, c) -> ElasticsearchException.fromXContent(p),
new ParseField("error")
);
}
public SimulateProcessorResult(String processorTag, IngestDocument ingestDocument, Exception failure) {
this.processorTag = processorTag;
this.ingestDocument = (ingestDocument == null) ? null : new WriteableIngestDocument(ingestDocument);
this.failure = failure;
}
SimulateProcessorResult(String processorTag, IngestDocument ingestDocument) {
public SimulateProcessorResult(String processorTag, IngestDocument ingestDocument) {
this(processorTag, ingestDocument, null);
}
SimulateProcessorResult(String processorTag, Exception failure) {
public SimulateProcessorResult(String processorTag, Exception failure) {
this(processorTag, null, failure);
}
@ -98,7 +156,7 @@ class SimulateProcessorResult implements Writeable, ToXContentObject {
}
if (failure != null && ingestDocument != null) {
builder.startObject("ignored_error");
builder.startObject(IGNORED_ERROR_FIELD);
ElasticsearchException.generateFailureXContent(builder, params, failure, true);
builder.endObject();
} else if (failure != null) {
@ -112,4 +170,8 @@ class SimulateProcessorResult implements Writeable, ToXContentObject {
builder.endObject();
return builder;
}
public static SimulateProcessorResult fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
}

View File

@ -20,24 +20,91 @@
package org.elasticsearch.action.ingest;
import org.elasticsearch.Version;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.ingest.IngestDocument;
import org.elasticsearch.ingest.IngestDocument.MetaData;
import java.io.IOException;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
final class WriteableIngestDocument implements Writeable, ToXContentFragment {
static final String SOURCE_FIELD = "_source";
static final String INGEST_FIELD = "_ingest";
static final String DOC_FIELD = "doc";
private final IngestDocument ingestDocument;
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<WriteableIngestDocument, Void> INGEST_DOC_PARSER =
new ConstructingObjectParser<>(
"ingest_document",
true,
a -> {
HashMap<String, Object> sourceAndMetadata = new HashMap<>();
sourceAndMetadata.put(MetaData.INDEX.getFieldName(), a[0]);
sourceAndMetadata.put(MetaData.TYPE.getFieldName(), a[1]);
sourceAndMetadata.put(MetaData.ID.getFieldName(), a[2]);
if (a[3] != null) {
sourceAndMetadata.put(MetaData.ROUTING.getFieldName(), a[3]);
}
if (a[4] != null) {
sourceAndMetadata.put(MetaData.VERSION.getFieldName(), a[4]);
}
if (a[5] != null) {
sourceAndMetadata.put(MetaData.VERSION_TYPE.getFieldName(), a[5]);
}
sourceAndMetadata.putAll((Map<String, Object>)a[6]);
return new WriteableIngestDocument(new IngestDocument(sourceAndMetadata, (Map<String, Object>)a[7]));
}
);
static {
INGEST_DOC_PARSER.declareString(constructorArg(), new ParseField(MetaData.INDEX.getFieldName()));
INGEST_DOC_PARSER.declareString(constructorArg(), new ParseField(MetaData.TYPE.getFieldName()));
INGEST_DOC_PARSER.declareString(constructorArg(), new ParseField(MetaData.ID.getFieldName()));
INGEST_DOC_PARSER.declareString(optionalConstructorArg(), new ParseField(MetaData.ROUTING.getFieldName()));
INGEST_DOC_PARSER.declareLong(optionalConstructorArg(), new ParseField(MetaData.VERSION.getFieldName()));
INGEST_DOC_PARSER.declareString(optionalConstructorArg(), new ParseField(MetaData.VERSION_TYPE.getFieldName()));
INGEST_DOC_PARSER.declareObject(constructorArg(), (p, c) -> p.map(), new ParseField(SOURCE_FIELD));
INGEST_DOC_PARSER.declareObject(
constructorArg(),
(p, c) -> {
Map<String, Object> ingestMap = p.map();
ingestMap.computeIfPresent(
"timestamp",
(k, o) -> ZonedDateTime.parse((String)o)
);
return ingestMap;
},
new ParseField(INGEST_FIELD)
);
}
@SuppressWarnings("unchecked")
public static final ConstructingObjectParser<WriteableIngestDocument, Void> PARSER =
new ConstructingObjectParser<>(
"writeable_ingest_document",
true,
a -> (WriteableIngestDocument)a[0]
);
static {
PARSER.declareObject(constructorArg(), INGEST_DOC_PARSER, new ParseField(DOC_FIELD));
}
WriteableIngestDocument(IngestDocument ingestDocument) {
assert ingestDocument != null;
this.ingestDocument = ingestDocument;
@ -67,19 +134,25 @@ final class WriteableIngestDocument implements Writeable, ToXContentFragment {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("doc");
Map<IngestDocument.MetaData, Object> metadataMap = ingestDocument.extractMetadata();
builder.startObject(DOC_FIELD);
Map<IngestDocument.MetaData, Object> metadataMap = ingestDocument.getMetadata();
for (Map.Entry<IngestDocument.MetaData, Object> metadata : metadataMap.entrySet()) {
if (metadata.getValue() != null) {
builder.field(metadata.getKey().getFieldName(), metadata.getValue().toString());
}
}
builder.field("_source", ingestDocument.getSourceAndMetadata());
builder.field("_ingest", ingestDocument.getIngestMetadata());
Map<String, Object> source = IngestDocument.deepCopyMap(ingestDocument.getSourceAndMetadata());
metadataMap.keySet().forEach(mD -> source.remove(mD.getFieldName()));
builder.field(SOURCE_FIELD, source);
builder.field(INGEST_FIELD, ingestDocument.getIngestMetadata());
builder.endObject();
return builder;
}
public static WriteableIngestDocument fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
@Override
public boolean equals(Object o) {
if (this == o) {

View File

@ -30,7 +30,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
public class TransportMainAction extends HandledTransportAction<MainRequest, MainResponse> {
@ -38,9 +37,9 @@ public class TransportMainAction extends HandledTransportAction<MainRequest, Mai
private final ClusterService clusterService;
@Inject
public TransportMainAction(Settings settings, ThreadPool threadPool, TransportService transportService,
public TransportMainAction(Settings settings, TransportService transportService,
ActionFilters actionFilters, ClusterService clusterService) {
super(settings, MainAction.NAME, threadPool, transportService, actionFilters, MainRequest::new);
super(settings, MainAction.NAME, transportService, actionFilters, MainRequest::new);
this.clusterService = clusterService;
}

View File

@ -25,7 +25,6 @@ import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
public class TransportClearScrollAction extends HandledTransportAction<ClearScrollRequest, ClearScrollResponse> {
@ -34,10 +33,10 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
private final SearchTransportService searchTransportService;
@Inject
public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool,
public TransportClearScrollAction(Settings settings, TransportService transportService,
ClusterService clusterService, ActionFilters actionFilters,
SearchTransportService searchTransportService) {
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters,
super(settings, ClearScrollAction.NAME, transportService, actionFilters,
ClearScrollRequest::new);
this.clusterService = clusterService;
this.searchTransportService = searchTransportService;

View File

@ -42,6 +42,7 @@ import java.util.function.LongSupplier;
public class TransportMultiSearchAction extends HandledTransportAction<MultiSearchRequest, MultiSearchResponse> {
private final int availableProcessors;
private final ThreadPool threadPool;
private final ClusterService clusterService;
private final LongSupplier relativeTimeProvider;
private final NodeClient client;
@ -49,7 +50,8 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
@Inject
public TransportMultiSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ClusterService clusterService, ActionFilters actionFilters, NodeClient client) {
super(settings, MultiSearchAction.NAME, threadPool, transportService, actionFilters, MultiSearchRequest::new);
super(settings, MultiSearchAction.NAME, transportService, actionFilters, MultiSearchRequest::new);
this.threadPool = threadPool;
this.clusterService = clusterService;
this.availableProcessors = EsExecutors.numberOfProcessors(settings);
this.relativeTimeProvider = System::nanoTime;
@ -59,7 +61,8 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
TransportMultiSearchAction(ThreadPool threadPool, ActionFilters actionFilters, TransportService transportService,
ClusterService clusterService, int availableProcessors,
LongSupplier relativeTimeProvider, NodeClient client) {
super(Settings.EMPTY, MultiSearchAction.NAME, threadPool, transportService, actionFilters, MultiSearchRequest::new);
super(Settings.EMPTY, MultiSearchAction.NAME, transportService, actionFilters, MultiSearchRequest::new);
this.threadPool = threadPool;
this.clusterService = clusterService;
this.availableProcessors = availableProcessors;
this.relativeTimeProvider = relativeTimeProvider;

View File

@ -70,6 +70,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
public static final Setting<Long> SHARD_COUNT_LIMIT_SETTING = Setting.longSetting(
"action.search.shard_count.limit", Long.MAX_VALUE, 1L, Property.Dynamic, Property.NodeScope);
private final ThreadPool threadPool;
private final ClusterService clusterService;
private final SearchTransportService searchTransportService;
private final RemoteClusterService remoteClusterService;
@ -82,8 +83,8 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController,
ClusterService clusterService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters,
(Writeable.Reader<SearchRequest>) SearchRequest::new);
super(settings, SearchAction.NAME, transportService, actionFilters, (Writeable.Reader<SearchRequest>) SearchRequest::new);
this.threadPool = threadPool;
this.searchPhaseController = searchPhaseController;
this.searchTransportService = searchTransportService;
this.remoteClusterService = searchTransportService.getRemoteClusterService();

View File

@ -27,7 +27,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import static org.elasticsearch.action.search.ParsedScrollId.QUERY_AND_FETCH_TYPE;
@ -41,10 +40,10 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
private final SearchPhaseController searchPhaseController;
@Inject
public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService,
public TransportSearchScrollAction(Settings settings, TransportService transportService,
ClusterService clusterService, ActionFilters actionFilters,
SearchTransportService searchTransportService, SearchPhaseController searchPhaseController) {
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters,
super(settings, SearchScrollAction.NAME, transportService, actionFilters,
(Writeable.Reader<SearchScrollRequest>) SearchScrollRequest::new);
this.clusterService = clusterService;
this.searchTransportService = searchTransportService;

View File

@ -37,29 +37,27 @@ import java.util.function.Supplier;
*/
public abstract class HandledTransportAction<Request extends ActionRequest, Response extends ActionResponse>
extends TransportAction<Request, Response> {
protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters,
Supplier<Request> request) {
this(settings, actionName, true, threadPool, transportService, actionFilters, request);
protected HandledTransportAction(Settings settings, String actionName, TransportService transportService,
ActionFilters actionFilters, Supplier<Request> request) {
this(settings, actionName, true, transportService, actionFilters, request);
}
protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService,
protected HandledTransportAction(Settings settings, String actionName, TransportService transportService,
ActionFilters actionFilters, Writeable.Reader<Request> requestReader) {
this(settings, actionName, true, threadPool, transportService, actionFilters, requestReader);
this(settings, actionName, true, transportService, actionFilters, requestReader);
}
protected HandledTransportAction(Settings settings, String actionName, boolean canTripCircuitBreaker, ThreadPool threadPool,
TransportService transportService, ActionFilters actionFilters,
Supplier<Request> request) {
super(settings, actionName, threadPool, actionFilters, transportService.getTaskManager());
protected HandledTransportAction(Settings settings, String actionName, boolean canTripCircuitBreaker,
TransportService transportService, ActionFilters actionFilters, Supplier<Request> request) {
super(settings, actionName, actionFilters, transportService.getTaskManager());
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, false, canTripCircuitBreaker,
new TransportHandler());
}
protected HandledTransportAction(Settings settings, String actionName, boolean canTripCircuitBreaker, ThreadPool threadPool,
protected HandledTransportAction(Settings settings, String actionName, boolean canTripCircuitBreaker,
TransportService transportService, ActionFilters actionFilters,
Writeable.Reader<Request> requestReader) {
super(settings, actionName, threadPool, actionFilters, transportService.getTaskManager());
super(settings, actionName, actionFilters, transportService.getTaskManager());
transportService.registerRequestHandler(actionName, ThreadPool.Names.SAME, false, canTripCircuitBreaker, requestReader,
new TransportHandler());
}

View File

@ -29,21 +29,17 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskListener;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.concurrent.atomic.AtomicInteger;
public abstract class TransportAction<Request extends ActionRequest, Response extends ActionResponse> extends AbstractComponent {
protected final ThreadPool threadPool;
protected final String actionName;
private final ActionFilter[] filters;
protected final TaskManager taskManager;
protected TransportAction(Settings settings, String actionName, ThreadPool threadPool, ActionFilters actionFilters,
TaskManager taskManager) {
protected TransportAction(Settings settings, String actionName, ActionFilters actionFilters, TaskManager taskManager) {
super(settings);
this.threadPool = threadPool;
this.actionName = actionName;
this.filters = actionFilters.filters();
this.taskManager = taskManager;

View File

@ -58,10 +58,10 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
final String transportShardAction;
protected TransportBroadcastAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService,
protected TransportBroadcastAction(Settings settings, String actionName, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
Supplier<Request> request, Supplier<ShardRequest> shardRequest, String shardExecutor) {
super(settings, actionName, threadPool, transportService, actionFilters, request);
super(settings, actionName, transportService, actionFilters, request);
this.clusterService = clusterService;
this.transportService = transportService;
this.indexNameExpressionResolver = indexNameExpressionResolver;

View File

@ -88,21 +88,18 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
public TransportBroadcastByNodeAction(
Settings settings,
String actionName,
ThreadPool threadPool,
ClusterService clusterService,
TransportService transportService,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
Supplier<Request> request,
String executor) {
this(settings, actionName, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, request,
executor, true);
this(settings, actionName, clusterService, transportService, actionFilters, indexNameExpressionResolver, request, executor, true);
}
public TransportBroadcastByNodeAction(
Settings settings,
String actionName,
ThreadPool threadPool,
ClusterService clusterService,
TransportService transportService,
ActionFilters actionFilters,
@ -110,8 +107,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
Supplier<Request> request,
String executor,
boolean canTripCircuitBreaker) {
super(settings, actionName, canTripCircuitBreaker, threadPool, transportService, actionFilters,
request);
super(settings, actionName, canTripCircuitBreaker, transportService, actionFilters, request);
this.clusterService = clusterService;
this.transportService = transportService;

View File

@ -54,6 +54,7 @@ import java.util.function.Supplier;
* A base class for operations that needs to be performed on the master node.
*/
public abstract class TransportMasterNodeAction<Request extends MasterNodeRequest<Request>, Response extends ActionResponse> extends HandledTransportAction<Request, Response> {
protected final ThreadPool threadPool;
protected final TransportService transportService;
protected final ClusterService clusterService;
protected final IndexNameExpressionResolver indexNameExpressionResolver;
@ -75,10 +76,10 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
protected TransportMasterNodeAction(Settings settings, String actionName, boolean canTripCircuitBreaker,
TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request) {
super(settings, actionName, canTripCircuitBreaker, threadPool, transportService, actionFilters,
request);
super(settings, actionName, canTripCircuitBreaker, transportService, actionFilters, request);
this.transportService = transportService;
this.clusterService = clusterService;
this.threadPool = threadPool;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.executor = executor();
}
@ -87,10 +88,10 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
ActionFilters actionFilters, Writeable.Reader<Request> request,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, actionName, canTripCircuitBreaker, threadPool, transportService, actionFilters, request
);
super(settings, actionName, canTripCircuitBreaker, transportService, actionFilters, request);
this.transportService = transportService;
this.clusterService = clusterService;
this.threadPool = threadPool;
this.indexNameExpressionResolver = indexNameExpressionResolver;
this.executor = executor();
}

View File

@ -54,6 +54,7 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
NodeResponse extends BaseNodeResponse>
extends HandledTransportAction<NodesRequest, NodesResponse> {
protected final ThreadPool threadPool;
protected final ClusterService clusterService;
protected final TransportService transportService;
protected final Class<NodeResponse> nodeResponseClass;
@ -64,7 +65,8 @@ public abstract class TransportNodesAction<NodesRequest extends BaseNodesRequest
ClusterService clusterService, TransportService transportService, ActionFilters actionFilters,
Supplier<NodesRequest> request, Supplier<NodeRequest> nodeRequest, String nodeExecutor,
Class<NodeResponse> nodeResponseClass) {
super(settings, actionName, threadPool, transportService, actionFilters, request);
super(settings, actionName, transportService, actionFilters, request);
this.threadPool = threadPool;
this.clusterService = Objects.requireNonNull(clusterService);
this.transportService = Objects.requireNonNull(transportService);
this.nodeResponseClass = Objects.requireNonNull(nodeResponseClass);

View File

@ -38,7 +38,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
@ -58,10 +57,10 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
private final ClusterService clusterService;
private final IndexNameExpressionResolver indexNameExpressionResolver;
public TransportBroadcastReplicationAction(String name, Supplier<Request> request, Settings settings, ThreadPool threadPool, ClusterService clusterService,
public TransportBroadcastReplicationAction(String name, Supplier<Request> request, Settings settings, ClusterService clusterService,
TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportReplicationAction replicatedBroadcastShardAction) {
super(settings, name, threadPool, transportService, actionFilters, request);
super(settings, name, transportService, actionFilters, request);
this.replicatedBroadcastShardAction = replicatedBroadcastShardAction;
this.clusterService = clusterService;
this.indexNameExpressionResolver = indexNameExpressionResolver;

View File

@ -100,6 +100,7 @@ public abstract class TransportReplicationAction<
Response extends ReplicationResponse
> extends TransportAction<Request, Response> {
protected final ThreadPool threadPool;
protected final TransportService transportService;
protected final ClusterService clusterService;
protected final ShardStateAction shardStateAction;
@ -132,7 +133,8 @@ public abstract class TransportReplicationAction<
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request,
Supplier<ReplicaRequest> replicaRequest, String executor,
boolean syncGlobalCheckpointAfterOperation) {
super(settings, actionName, threadPool, actionFilters, transportService.getTaskManager());
super(settings, actionName, actionFilters, transportService.getTaskManager());
this.threadPool = threadPool;
this.transportService = transportService;
this.clusterService = clusterService;
this.indicesService = indicesService;

View File

@ -50,6 +50,8 @@ import java.util.function.Supplier;
public abstract class TransportInstanceSingleOperationAction<Request extends InstanceShardOperationRequest<Request>, Response extends ActionResponse>
extends HandledTransportAction<Request, Response> {
protected final ThreadPool threadPool;
protected final ClusterService clusterService;
protected final TransportService transportService;
protected final IndexNameExpressionResolver indexNameExpressionResolver;
@ -60,7 +62,8 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
protected TransportInstanceSingleOperationAction(Settings settings, String actionName, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request) {
super(settings, actionName, threadPool, transportService, actionFilters, request);
super(settings, actionName, transportService, actionFilters, request);
this.threadPool = threadPool;
this.clusterService = clusterService;
this.transportService = transportService;
this.indexNameExpressionResolver = indexNameExpressionResolver;

View File

@ -60,6 +60,7 @@ import static org.elasticsearch.action.support.TransportActions.isShardNotAvaila
*/
public abstract class TransportSingleShardAction<Request extends SingleShardRequest<Request>, Response extends ActionResponse> extends TransportAction<Request, Response> {
protected final ThreadPool threadPool;
protected final ClusterService clusterService;
protected final TransportService transportService;
protected final IndexNameExpressionResolver indexNameExpressionResolver;
@ -70,7 +71,8 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
protected TransportSingleShardAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
Supplier<Request> request, String executor) {
super(settings, actionName, threadPool, actionFilters, transportService.getTaskManager());
super(settings, actionName, actionFilters, transportService.getTaskManager());
this.threadPool = threadPool;
this.clusterService = clusterService;
this.transportService = transportService;
this.indexNameExpressionResolver = indexNameExpressionResolver;

View File

@ -77,10 +77,10 @@ public abstract class TransportTasksAction<
protected final String transportNodeAction;
protected TransportTasksAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService,
protected TransportTasksAction(Settings settings, String actionName, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters, Supplier<TasksRequest> requestSupplier,
Supplier<TasksResponse> responseSupplier, String nodeExecutor) {
super(settings, actionName, threadPool, transportService, actionFilters, requestSupplier);
super(settings, actionName, transportService, actionFilters, requestSupplier);
this.clusterService = clusterService;
this.transportService = transportService;
this.transportNodeAction = actionName + "[n]";

View File

@ -31,7 +31,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.HashMap;
@ -45,10 +44,10 @@ public class TransportMultiTermVectorsAction extends HandledTransportAction<Mult
private final IndexNameExpressionResolver indexNameExpressionResolver;
@Inject
public TransportMultiTermVectorsAction(Settings settings, ThreadPool threadPool, TransportService transportService,
public TransportMultiTermVectorsAction(Settings settings, TransportService transportService,
ClusterService clusterService, TransportShardMultiTermsVectorAction shardAction,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, MultiTermVectorsAction.NAME, threadPool, transportService, actionFilters, MultiTermVectorsRequest::new);
super(settings, MultiTermVectorsAction.NAME, transportService, actionFilters, MultiTermVectorsRequest::new);
this.clusterService = clusterService;
this.shardAction = shardAction;
this.indexNameExpressionResolver = indexNameExpressionResolver;

View File

@ -846,7 +846,7 @@ public class NumberFieldMapper extends FieldMapper {
public static final class NumberFieldType extends SimpleMappedFieldType {
NumberType type;
private final NumberType type;
public NumberFieldType(NumberType type) {
super();
@ -856,7 +856,7 @@ public class NumberFieldMapper extends FieldMapper {
setOmitNorms(true);
}
NumberFieldType(NumberFieldType other) {
private NumberFieldType(NumberFieldType other) {
super(other);
this.type = other.type;
}
@ -936,6 +936,20 @@ public class NumberFieldMapper extends FieldMapper {
return new DocValueFormat.Decimal(format);
}
}
@Override
public boolean equals(Object o) {
if (super.equals(o) == false) {
return false;
}
NumberFieldType that = (NumberFieldType) o;
return type == that.type;
}
@Override
public int hashCode() {
return Objects.hash(super.hashCode(), type);
}
}
private Explicit<Boolean> ignoreMalformed;

View File

@ -449,13 +449,13 @@ public class RecoverySourceHandler {
}
}
void prepareTargetForTranslog(final boolean createNewTranslog, final int totalTranslogOps) throws IOException {
void prepareTargetForTranslog(final boolean fileBasedRecovery, final int totalTranslogOps) throws IOException {
StopWatch stopWatch = new StopWatch().start();
logger.trace("recovery [phase1]: prepare remote engine for translog");
final long startEngineStart = stopWatch.totalTime().millis();
// Send a request preparing the new shard's translog to receive operations. This ensures the shard engine is started and disables
// garbage collection (not the JVM's GC!) of tombstone deletes.
cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(createNewTranslog, totalTranslogOps));
cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(fileBasedRecovery, totalTranslogOps));
stopWatch.stop();
response.startTime = stopWatch.totalTime().millis() - startEngineStart;

View File

@ -570,6 +570,17 @@ public final class IngestDocument {
return metadataMap;
}
/**
* Does the same thing as {@link #extractMetadata} but does not mutate the map.
*/
public Map<MetaData, Object> getMetadata() {
Map<MetaData, Object> metadataMap = new EnumMap<>(MetaData.class);
for (MetaData metaData : MetaData.values()) {
metadataMap.put(metaData, sourceAndMetadata.get(metaData.getFieldName()));
}
return metadataMap;
}
/**
* Returns the available ingest metadata fields, by default only timestamp, but it is possible to set additional ones.
* Use only for reading values, modify them instead using {@link #setFieldValue(String, Object)} and {@link #removeField(String)}
@ -588,7 +599,7 @@ public final class IngestDocument {
}
@SuppressWarnings("unchecked")
private static <K, V> Map<K, V> deepCopyMap(Map<K, V> source) {
public static <K, V> Map<K, V> deepCopyMap(Map<K, V> source) {
return (Map<K, V>) deepCopy(source);
}

View File

@ -32,7 +32,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.TypeMissingException;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.BytesRestResponse;
@ -89,14 +88,9 @@ public class RestGetMappingAction extends BaseRestHandler {
@Override
public RestResponse buildResponse(final GetMappingsResponse response, final XContentBuilder builder) throws Exception {
final ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappingsByIndex = response.getMappings();
if (mappingsByIndex.isEmpty() && (indices.length != 0 || types.length != 0)) {
if (indices.length != 0 && types.length == 0) {
builder.close();
return new BytesRestResponse(channel, new IndexNotFoundException(String.join(",", indices)));
} else {
builder.close();
return new BytesRestResponse(channel, new TypeMissingException("_all", String.join(",", types)));
}
if (mappingsByIndex.isEmpty() && types.length != 0) {
builder.close();
return new BytesRestResponse(channel, new TypeMissingException("_all", String.join(",", types)));
}
final Set<String> typeNames = new HashSet<>();

View File

@ -79,9 +79,8 @@ public class ActionModuleTests extends ESTestCase {
}
}
class FakeTransportAction extends TransportAction<FakeRequest, ActionResponse> {
protected FakeTransportAction(Settings settings, String actionName, ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, TaskManager taskManager) {
super(settings, actionName, threadPool, actionFilters, taskManager);
protected FakeTransportAction(Settings settings, String actionName, ActionFilters actionFilters, TaskManager taskManager) {
super(settings, actionName, actionFilters, taskManager);
}
@Override

View File

@ -192,9 +192,8 @@ public abstract class TaskManagerTestCase extends ESTestCase {
clusterService = createClusterService(threadPool, discoveryNode.get());
clusterService.addStateApplier(transportService.getTaskManager());
ActionFilters actionFilters = new ActionFilters(emptySet());
transportListTasksAction = new TransportListTasksAction(settings, threadPool, clusterService, transportService, actionFilters);
transportCancelTasksAction = new TransportCancelTasksAction(settings, threadPool, clusterService,
transportService, actionFilters);
transportListTasksAction = new TransportListTasksAction(settings, clusterService, transportService, actionFilters);
transportCancelTasksAction = new TransportCancelTasksAction(settings, clusterService, transportService, actionFilters);
transportService.acceptIncomingRequests();
}

View File

@ -424,12 +424,9 @@ public class TestTaskPlugin extends Plugin implements ActionPlugin {
UnblockTestTasksResponse, UnblockTestTaskResponse> {
@Inject
public TransportUnblockTestTasksAction(Settings settings,ThreadPool threadPool, ClusterService
clusterService,
TransportService transportService) {
super(settings, UnblockTestTasksAction.NAME, threadPool, clusterService, transportService, new ActionFilters(new
HashSet<>()),
UnblockTestTasksRequest::new, UnblockTestTasksResponse::new, ThreadPool.Names.MANAGEMENT);
public TransportUnblockTestTasksAction(Settings settings, ClusterService clusterService, TransportService transportService) {
super(settings, UnblockTestTasksAction.NAME, clusterService, transportService, new ActionFilters(new HashSet<>()),
UnblockTestTasksRequest::new, UnblockTestTasksResponse::new, ThreadPool.Names.MANAGEMENT);
}
@Override

View File

@ -254,9 +254,9 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
*/
abstract static class TestTasksAction extends TransportTasksAction<Task, TestTasksRequest, TestTasksResponse, TestTaskResponse> {
protected TestTasksAction(Settings settings, String actionName, ThreadPool threadPool,
protected TestTasksAction(Settings settings, String actionName,
ClusterService clusterService, TransportService transportService) {
super(settings, actionName, threadPool, clusterService, transportService, new ActionFilters(new HashSet<>()),
super(settings, actionName, clusterService, transportService, new ActionFilters(new HashSet<>()),
TestTasksRequest::new, TestTasksResponse::new,
ThreadPool.Names.MANAGEMENT);
}
@ -622,7 +622,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
for (int i = 0; i < testNodes.length; i++) {
final int node = i;
// Simulate task action that fails on one of the tasks on one of the nodes
tasksActions[i] = new TestTasksAction(CLUSTER_SETTINGS, "testTasksAction", threadPool, testNodes[i].clusterService,
tasksActions[i] = new TestTasksAction(CLUSTER_SETTINGS, "testTasksAction", testNodes[i].clusterService,
testNodes[i].transportService) {
@Override
protected void taskOperation(TestTasksRequest request, Task task, ActionListener<TestTaskResponse> listener) {
@ -701,7 +701,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
final int node = i;
// Simulate a task action that works on all nodes except nodes listed in filterNodes.
// We are testing that it works.
tasksActions[i] = new TestTasksAction(CLUSTER_SETTINGS, "testTasksAction", threadPool,
tasksActions[i] = new TestTasksAction(CLUSTER_SETTINGS, "testTasksAction",
testNodes[i].clusterService, testNodes[i].transportService) {
@Override

View File

@ -0,0 +1,138 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.ingest;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.ingest.IngestDocument;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.StringJoiner;
import java.util.function.Predicate;
import java.util.function.Supplier;
import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.elasticsearch.action.ingest.WriteableIngestDocumentTests.createRandomIngestDoc;
public class SimulateDocumentBaseResultTests extends AbstractXContentTestCase<SimulateDocumentBaseResult> {
public void testSerialization() throws IOException {
boolean isFailure = randomBoolean();
SimulateDocumentBaseResult simulateDocumentBaseResult = createTestInstance(isFailure);
BytesStreamOutput out = new BytesStreamOutput();
simulateDocumentBaseResult.writeTo(out);
StreamInput streamInput = out.bytes().streamInput();
SimulateDocumentBaseResult otherSimulateDocumentBaseResult = new SimulateDocumentBaseResult(streamInput);
if (isFailure) {
assertThat(otherSimulateDocumentBaseResult.getIngestDocument(), equalTo(simulateDocumentBaseResult.getIngestDocument()));
assertThat(otherSimulateDocumentBaseResult.getFailure(), instanceOf(IllegalArgumentException.class));
IllegalArgumentException e = (IllegalArgumentException) otherSimulateDocumentBaseResult.getFailure();
assertThat(e.getMessage(), equalTo("test"));
} else {
assertIngestDocument(otherSimulateDocumentBaseResult.getIngestDocument(), simulateDocumentBaseResult.getIngestDocument());
}
}
static SimulateDocumentBaseResult createTestInstance(boolean isFailure) {
SimulateDocumentBaseResult simulateDocumentBaseResult;
if (isFailure) {
simulateDocumentBaseResult = new SimulateDocumentBaseResult(new IllegalArgumentException("test"));
} else {
IngestDocument ingestDocument = createRandomIngestDoc();
simulateDocumentBaseResult = new SimulateDocumentBaseResult(ingestDocument);
}
return simulateDocumentBaseResult;
}
private static SimulateDocumentBaseResult createTestInstanceWithFailures() {
return createTestInstance(randomBoolean());
}
@Override
protected SimulateDocumentBaseResult createTestInstance() {
return createTestInstance(false);
}
@Override
protected SimulateDocumentBaseResult doParseInstance(XContentParser parser) {
return SimulateDocumentBaseResult.fromXContent(parser);
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
// We cannot have random fields in the _source field and _ingest field
return field ->
field.contains(
new StringJoiner(".")
.add(WriteableIngestDocument.DOC_FIELD)
.add(WriteableIngestDocument.SOURCE_FIELD).toString()
) ||
field.contains(
new StringJoiner(".")
.add(WriteableIngestDocument.DOC_FIELD)
.add(WriteableIngestDocument.INGEST_FIELD).toString()
);
}
public static void assertEqualDocs(SimulateDocumentBaseResult response, SimulateDocumentBaseResult parsedResponse) {
assertEquals(response.getIngestDocument(), parsedResponse.getIngestDocument());
if (response.getFailure() != null) {
assertNotNull(parsedResponse.getFailure());
assertThat(
parsedResponse.getFailure().getMessage(),
containsString(response.getFailure().getMessage())
);
} else {
assertNull(parsedResponse.getFailure());
}
}
@Override
public void assertEqualInstances(SimulateDocumentBaseResult response, SimulateDocumentBaseResult parsedResponse) {
assertEqualDocs(response, parsedResponse);
}
/**
* Test parsing {@link SimulateDocumentBaseResult} with inner failures as they don't support asserting on xcontent
* equivalence, given that exceptions are not parsed back as the same original class. We run the usual
* {@link AbstractXContentTestCase#testFromXContent()} without failures, and this other test with failures where
* we disable asserting on xcontent equivalence at the end.
*/
public void testFromXContentWithFailures() throws IOException {
Supplier<SimulateDocumentBaseResult> instanceSupplier = SimulateDocumentBaseResultTests::createTestInstanceWithFailures;
//exceptions are not of the same type whenever parsed back
boolean assertToXContentEquivalence = false;
AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields(),
getShuffleFieldsExceptions(), getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance,
this::assertEqualInstances, assertToXContentEquivalence, getToXContentParams());
}
}

View File

@ -1,60 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.ingest;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.ingest.RandomDocumentPicks;
import org.elasticsearch.ingest.IngestDocument;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
public class SimulateDocumentSimpleResultTests extends ESTestCase {
public void testSerialization() throws IOException {
boolean isFailure = randomBoolean();
SimulateDocumentBaseResult simulateDocumentBaseResult;
if (isFailure) {
simulateDocumentBaseResult = new SimulateDocumentBaseResult(new IllegalArgumentException("test"));
} else {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
simulateDocumentBaseResult = new SimulateDocumentBaseResult(ingestDocument);
}
BytesStreamOutput out = new BytesStreamOutput();
simulateDocumentBaseResult.writeTo(out);
StreamInput streamInput = out.bytes().streamInput();
SimulateDocumentBaseResult otherSimulateDocumentBaseResult = new SimulateDocumentBaseResult(streamInput);
if (isFailure) {
assertThat(otherSimulateDocumentBaseResult.getIngestDocument(), equalTo(simulateDocumentBaseResult.getIngestDocument()));
assertThat(otherSimulateDocumentBaseResult.getFailure(), instanceOf(IllegalArgumentException.class));
IllegalArgumentException e = (IllegalArgumentException) otherSimulateDocumentBaseResult.getFailure();
assertThat(e.getMessage(), equalTo("test"));
} else {
assertIngestDocument(otherSimulateDocumentBaseResult.getIngestDocument(), simulateDocumentBaseResult.getIngestDocument());
}
}
}

View File

@ -0,0 +1,113 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.ingest;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.StringJoiner;
import java.util.function.Predicate;
import java.util.function.Supplier;
public class SimulateDocumentVerboseResultTests extends AbstractXContentTestCase<SimulateDocumentVerboseResult> {
static SimulateDocumentVerboseResult createTestInstance(boolean withFailures) {
int numDocs = randomIntBetween(0, 10);
List<SimulateProcessorResult> results = new ArrayList<>();
for (int i = 0; i<numDocs; i++) {
boolean isSuccessful = !(withFailures && randomBoolean());
boolean isIgnoredError = withFailures && randomBoolean();
results.add(
SimulateProcessorResultTests
.createTestInstance(isSuccessful, isIgnoredError)
);
}
return new SimulateDocumentVerboseResult(results);
}
private static SimulateDocumentVerboseResult createTestInstanceWithFailures() {
return createTestInstance(true);
}
@Override
protected SimulateDocumentVerboseResult createTestInstance() {
return createTestInstance(false);
}
@Override
protected SimulateDocumentVerboseResult doParseInstance(XContentParser parser) {
return SimulateDocumentVerboseResult.fromXContent(parser);
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
static void assertEqualDocs(SimulateDocumentVerboseResult response,
SimulateDocumentVerboseResult parsedResponse) {
assertEquals(response.getProcessorResults().size(), parsedResponse.getProcessorResults().size());
for (int i=0; i < response.getProcessorResults().size(); i++) {
SimulateProcessorResultTests.assertEqualProcessorResults(
response.getProcessorResults().get(i),
parsedResponse.getProcessorResults().get(i)
);
}
}
@Override
protected void assertEqualInstances(SimulateDocumentVerboseResult response,
SimulateDocumentVerboseResult parsedResponse) {
assertEqualDocs(response, parsedResponse);
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
// We cannot have random fields in the _source field and _ingest field
return field ->
field.contains(
new StringJoiner(".")
.add(WriteableIngestDocument.DOC_FIELD)
.add(WriteableIngestDocument.SOURCE_FIELD).toString()
) ||
field.contains(
new StringJoiner(".")
.add(WriteableIngestDocument.DOC_FIELD)
.add(WriteableIngestDocument.INGEST_FIELD).toString()
);
}
/**
* Test parsing {@link SimulateDocumentVerboseResult} with inner failures as they don't support asserting on xcontent
* equivalence, given that exceptions are not parsed back as the same original class. We run the usual
* {@link AbstractXContentTestCase#testFromXContent()} without failures, and this other test with failures where we
* disable asserting on xcontent equivalence at the end.
*/
public void testFromXContentWithFailures() throws IOException {
Supplier<SimulateDocumentVerboseResult> instanceSupplier = SimulateDocumentVerboseResultTests::createTestInstanceWithFailures;
//exceptions are not of the same type whenever parsed back
boolean assertToXContentEquivalence = false;
AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields(),
getShuffleFieldsExceptions(), getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance,
this::assertEqualInstances, assertToXContentEquivalence, getToXContentParams());
}
}

View File

@ -21,57 +21,29 @@ package org.elasticsearch.action.ingest;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.ingest.IngestDocument;
import org.elasticsearch.ingest.RandomDocumentPicks;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.StringJoiner;
import java.util.function.Predicate;
import java.util.function.Supplier;
import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.CoreMatchers.nullValue;
public class SimulatePipelineResponseTests extends ESTestCase {
public class SimulatePipelineResponseTests extends AbstractXContentTestCase<SimulatePipelineResponse> {
public void testSerialization() throws IOException {
boolean isVerbose = randomBoolean();
String id = randomBoolean() ? randomAlphaOfLengthBetween(1, 10) : null;
int numResults = randomIntBetween(1, 10);
List<SimulateDocumentResult> results = new ArrayList<>(numResults);
for (int i = 0; i < numResults; i++) {
boolean isFailure = randomBoolean();
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
if (isVerbose) {
int numProcessors = randomIntBetween(1, 10);
List<SimulateProcessorResult> processorResults = new ArrayList<>(numProcessors);
for (int j = 0; j < numProcessors; j++) {
String processorTag = randomAlphaOfLengthBetween(1, 10);
SimulateProcessorResult processorResult;
if (isFailure) {
processorResult = new SimulateProcessorResult(processorTag, new IllegalArgumentException("test"));
} else {
processorResult = new SimulateProcessorResult(processorTag, ingestDocument);
}
processorResults.add(processorResult);
}
results.add(new SimulateDocumentVerboseResult(processorResults));
} else {
results.add(new SimulateDocumentBaseResult(ingestDocument));
SimulateDocumentBaseResult simulateDocumentBaseResult;
if (isFailure) {
simulateDocumentBaseResult = new SimulateDocumentBaseResult(new IllegalArgumentException("test"));
} else {
simulateDocumentBaseResult = new SimulateDocumentBaseResult(ingestDocument);
}
results.add(simulateDocumentBaseResult);
}
}
SimulatePipelineResponse response = new SimulatePipelineResponse(id, isVerbose, results);
SimulatePipelineResponse response = createInstance(id, isVerbose, true);
BytesStreamOutput out = new BytesStreamOutput();
response.writeTo(out);
StreamInput streamInput = out.bytes().streamInput();
@ -120,4 +92,97 @@ public class SimulatePipelineResponseTests extends ESTestCase {
}
}
}
static SimulatePipelineResponse createInstance(String pipelineId, boolean isVerbose, boolean withFailure) {
int numResults = randomIntBetween(1, 10);
List<SimulateDocumentResult> results = new ArrayList<>(numResults);
for (int i = 0; i < numResults; i++) {
if (isVerbose) {
results.add(
SimulateDocumentVerboseResultTests.createTestInstance(withFailure)
);
} else {
results.add(
SimulateDocumentBaseResultTests.createTestInstance(withFailure && randomBoolean())
);
}
}
return new SimulatePipelineResponse(pipelineId, isVerbose, results);
}
private static SimulatePipelineResponse createTestInstanceWithFailures() {
boolean isVerbose = randomBoolean();
return createInstance(null, isVerbose, false);
}
@Override
protected SimulatePipelineResponse createTestInstance() {
boolean isVerbose = randomBoolean();
// since the pipeline id is not serialized with XContent we set it to null for equality tests.
// we test failures separately since comparing XContent is not possible with failures
return createInstance(null, isVerbose, false);
}
@Override
protected SimulatePipelineResponse doParseInstance(XContentParser parser) {
return SimulatePipelineResponse.fromXContent(parser);
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
@Override
protected void assertEqualInstances(SimulatePipelineResponse response,
SimulatePipelineResponse parsedResponse) {
assertEquals(response.getPipelineId(), parsedResponse.getPipelineId());
assertEquals(response.isVerbose(), parsedResponse.isVerbose());
assertEquals(response.getResults().size(), parsedResponse.getResults().size());
for (int i=0; i < response.getResults().size(); i++) {
if (response.isVerbose()) {
assertThat(response.getResults().get(i), instanceOf(SimulateDocumentVerboseResult.class));
assertThat(parsedResponse.getResults().get(i), instanceOf(SimulateDocumentVerboseResult.class));
SimulateDocumentVerboseResult responseResult = (SimulateDocumentVerboseResult)response.getResults().get(i);
SimulateDocumentVerboseResult parsedResult = (SimulateDocumentVerboseResult)parsedResponse.getResults().get(i);
SimulateDocumentVerboseResultTests.assertEqualDocs(responseResult, parsedResult);
} else {
assertThat(response.getResults().get(i), instanceOf(SimulateDocumentBaseResult.class));
assertThat(parsedResponse.getResults().get(i), instanceOf(SimulateDocumentBaseResult.class));
SimulateDocumentBaseResult responseResult = (SimulateDocumentBaseResult)response.getResults().get(i);
SimulateDocumentBaseResult parsedResult = (SimulateDocumentBaseResult)parsedResponse.getResults().get(i);
SimulateDocumentBaseResultTests.assertEqualDocs(responseResult, parsedResult);
}
}
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
// We cannot have random fields in the _source field and _ingest field
return field ->
field.contains(
new StringJoiner(".")
.add(WriteableIngestDocument.DOC_FIELD)
.add(WriteableIngestDocument.SOURCE_FIELD).toString()
) ||
field.contains(
new StringJoiner(".")
.add(WriteableIngestDocument.DOC_FIELD)
.add(WriteableIngestDocument.INGEST_FIELD).toString()
);
}
/**
* Test parsing {@link SimulatePipelineResponse} with inner failures as they don't support asserting on xcontent equivalence, given that
* exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()}
* without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end.
*/
public void testFromXContentWithFailures() throws IOException {
Supplier<SimulatePipelineResponse> instanceSupplier = SimulatePipelineResponseTests::createTestInstanceWithFailures;
//exceptions are not of the same type whenever parsed back
boolean assertToXContentEquivalence = false;
AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields(), getShuffleFieldsExceptions(),
getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance,
this::assertEqualInstances, assertToXContentEquivalence, getToXContentParams());
}
}

View File

@ -21,35 +21,29 @@ package org.elasticsearch.action.ingest;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.ingest.RandomDocumentPicks;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.ingest.IngestDocument;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.StringJoiner;
import java.util.function.Predicate;
import java.util.function.Supplier;
import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
import static org.elasticsearch.action.ingest.WriteableIngestDocumentTests.createRandomIngestDoc;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.nullValue;
public class SimulateProcessorResultTests extends ESTestCase {
public class SimulateProcessorResultTests extends AbstractXContentTestCase<SimulateProcessorResult> {
public void testSerialization() throws IOException {
String processorTag = randomAlphaOfLengthBetween(1, 10);
boolean isSuccessful = randomBoolean();
boolean isIgnoredException = randomBoolean();
SimulateProcessorResult simulateProcessorResult;
if (isSuccessful) {
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
if (isIgnoredException) {
simulateProcessorResult = new SimulateProcessorResult(processorTag, ingestDocument, new IllegalArgumentException("test"));
} else {
simulateProcessorResult = new SimulateProcessorResult(processorTag, ingestDocument);
}
} else {
simulateProcessorResult = new SimulateProcessorResult(processorTag, new IllegalArgumentException("test"));
}
SimulateProcessorResult simulateProcessorResult = createTestInstance(isSuccessful, isIgnoredException);
BytesStreamOutput out = new BytesStreamOutput();
simulateProcessorResult.writeTo(out);
@ -72,4 +66,96 @@ public class SimulateProcessorResultTests extends ESTestCase {
assertThat(e.getMessage(), equalTo("test"));
}
}
static SimulateProcessorResult createTestInstance(boolean isSuccessful,
boolean isIgnoredException) {
String processorTag = randomAlphaOfLengthBetween(1, 10);
SimulateProcessorResult simulateProcessorResult;
if (isSuccessful) {
IngestDocument ingestDocument = createRandomIngestDoc();
if (isIgnoredException) {
simulateProcessorResult = new SimulateProcessorResult(processorTag, ingestDocument, new IllegalArgumentException("test"));
} else {
simulateProcessorResult = new SimulateProcessorResult(processorTag, ingestDocument);
}
} else {
simulateProcessorResult = new SimulateProcessorResult(processorTag, new IllegalArgumentException("test"));
}
return simulateProcessorResult;
}
private static SimulateProcessorResult createTestInstanceWithFailures() {
boolean isSuccessful = randomBoolean();
boolean isIgnoredException = randomBoolean();
return createTestInstance(isSuccessful, isIgnoredException);
}
@Override
protected SimulateProcessorResult createTestInstance() {
// we test failures separately since comparing XContent is not possible with failures
return createTestInstance(true, false);
}
@Override
protected SimulateProcessorResult doParseInstance(XContentParser parser) {
return SimulateProcessorResult.fromXContent(parser);
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
// We cannot have random fields in the _source field and _ingest field
return field ->
field.startsWith(
new StringJoiner(".")
.add(WriteableIngestDocument.DOC_FIELD)
.add(WriteableIngestDocument.SOURCE_FIELD).toString()
) ||
field.startsWith(
new StringJoiner(".")
.add(WriteableIngestDocument.DOC_FIELD)
.add(WriteableIngestDocument.INGEST_FIELD).toString()
);
}
static void assertEqualProcessorResults(SimulateProcessorResult response,
SimulateProcessorResult parsedResponse) {
assertEquals(response.getProcessorTag(), parsedResponse.getProcessorTag());
assertEquals(response.getIngestDocument(), parsedResponse.getIngestDocument());
if (response.getFailure() != null ) {
assertNotNull(parsedResponse.getFailure());
assertThat(
parsedResponse.getFailure().getMessage(),
containsString(response.getFailure().getMessage())
);
} else {
assertNull(parsedResponse.getFailure());
}
}
@Override
protected void assertEqualInstances(SimulateProcessorResult response, SimulateProcessorResult parsedResponse) {
assertEqualProcessorResults(response, parsedResponse);
}
/**
* Test parsing {@link SimulateProcessorResult} with inner failures as they don't support asserting on xcontent equivalence, given that
* exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()}
* without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end.
*/
public void testFromXContentWithFailures() throws IOException {
Supplier<SimulateProcessorResult> instanceSupplier = SimulateProcessorResultTests::createTestInstanceWithFailures;
//with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata,
//but that does not bother our assertions, as we only want to test that we don't break.
boolean supportsUnknownFields = true;
//exceptions are not of the same type whenever parsed back
boolean assertToXContentEquivalence = false;
AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields,
getShuffleFieldsExceptions(), getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance,
this::assertEqualInstances, assertToXContentEquivalence, getToXContentParams());
}
}

View File

@ -25,14 +25,19 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.ingest.RandomDocumentPicks;
import org.elasticsearch.ingest.IngestDocument;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.AbstractXContentTestCase;
import org.elasticsearch.test.RandomObjects;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.StringJoiner;
import java.util.function.Predicate;
import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
import static org.elasticsearch.ingest.IngestDocumentMatcher.assertIngestDocument;
@ -40,7 +45,7 @@ import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
public class WriteableIngestDocumentTests extends ESTestCase {
public class WriteableIngestDocumentTests extends AbstractXContentTestCase<WriteableIngestDocument> {
public void testEqualsAndHashcode() throws Exception {
Map<String, Object> sourceAndMetadata = RandomDocumentPicks.randomSource(random());
@ -147,4 +152,42 @@ public class WriteableIngestDocumentTests extends ESTestCase {
IngestDocument serializedIngestDocument = new IngestDocument(toXContentSource, toXContentIngestMetadata);
assertThat(serializedIngestDocument, equalTo(serializedIngestDocument));
}
static IngestDocument createRandomIngestDoc() {
XContentType xContentType = randomFrom(XContentType.values());
BytesReference sourceBytes = RandomObjects.randomSource(random(), xContentType);
Map<String, Object> randomSource = XContentHelper.convertToMap(sourceBytes, false, xContentType).v2();
return RandomDocumentPicks.randomIngestDocument(random(), randomSource);
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
@Override
protected WriteableIngestDocument createTestInstance() {
return new WriteableIngestDocument(createRandomIngestDoc());
}
@Override
protected WriteableIngestDocument doParseInstance(XContentParser parser) {
return WriteableIngestDocument.fromXContent(parser);
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
// We cannot have random fields in the _source field and _ingest field
return field ->
field.startsWith(
new StringJoiner(".")
.add(WriteableIngestDocument.DOC_FIELD)
.add(WriteableIngestDocument.SOURCE_FIELD).toString()
) ||
field.startsWith(
new StringJoiner(".")
.add(WriteableIngestDocument.DOC_FIELD)
.add(WriteableIngestDocument.INGEST_FIELD).toString()
);
}
}

View File

@ -30,7 +30,6 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.Collections;
@ -68,8 +67,7 @@ public class MainActionTests extends ESTestCase {
TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
x -> null, null, Collections.emptySet());
TransportMainAction action = new TransportMainAction(settings, mock(ThreadPool.class), transportService, mock(ActionFilters.class),
clusterService);
TransportMainAction action = new TransportMainAction(settings, transportService, mock(ActionFilters.class), clusterService);
AtomicReference<MainResponse> responseRef = new AtomicReference<>();
action.doExecute(new MainRequest(), new ActionListener<MainResponse>() {
@Override

View File

@ -80,7 +80,7 @@ public class TransportActionFilterChainTests extends ESTestCase {
String actionName = randomAlphaOfLength(randomInt(30));
ActionFilters actionFilters = new ActionFilters(filters);
TransportAction<TestRequest, TestResponse> transportAction =
new TransportAction<TestRequest, TestResponse>(Settings.EMPTY, actionName, null, actionFilters,
new TransportAction<TestRequest, TestResponse>(Settings.EMPTY, actionName, actionFilters,
new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet())) {
@Override
protected void doExecute(TestRequest request, ActionListener<TestResponse> listener) {
@ -158,7 +158,7 @@ public class TransportActionFilterChainTests extends ESTestCase {
String actionName = randomAlphaOfLength(randomInt(30));
ActionFilters actionFilters = new ActionFilters(filters);
TransportAction<TestRequest, TestResponse> transportAction = new TransportAction<TestRequest, TestResponse>(Settings.EMPTY,
actionName, null, actionFilters, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet())) {
actionName, actionFilters, new TaskManager(Settings.EMPTY, threadPool, Collections.emptySet())) {
@Override
protected void doExecute(TestRequest request, ActionListener<TestResponse> listener) {
listener.onResponse(new TestResponse());

View File

@ -118,7 +118,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
private final Map<ShardRouting, Object> shards = new HashMap<>();
TestTransportBroadcastByNodeAction(Settings settings, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request, String executor) {
super(settings, "indices:admin/test", THREAD_POOL, TransportBroadcastByNodeActionTests.this.clusterService, transportService, actionFilters, indexNameExpressionResolver, request, executor);
super(settings, "indices:admin/test", TransportBroadcastByNodeActionTests.this.clusterService, transportService, actionFilters, indexNameExpressionResolver, request, executor);
}
@Override

View File

@ -100,7 +100,7 @@ public class BroadcastReplicationTests extends ESTestCase {
TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> clusterService.localNode(), null, Collections.emptySet());
transportService.start();
transportService.acceptIncomingRequests();
broadcastReplicationAction = new TestBroadcastReplicationAction(Settings.EMPTY, threadPool, clusterService, transportService,
broadcastReplicationAction = new TestBroadcastReplicationAction(Settings.EMPTY, clusterService, transportService,
new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), null);
}
@ -206,10 +206,10 @@ public class BroadcastReplicationTests extends ESTestCase {
private class TestBroadcastReplicationAction extends TransportBroadcastReplicationAction<DummyBroadcastRequest, BroadcastResponse, BasicReplicationRequest, ReplicationResponse> {
protected final Set<Tuple<ShardId, ActionListener<ReplicationResponse>>> capturedShardRequests = ConcurrentCollections.newConcurrentSet();
TestBroadcastReplicationAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
TransportReplicationAction replicatedBroadcastShardAction) {
super("test-broadcast-replication-action", DummyBroadcastRequest::new, settings, threadPool, clusterService, transportService,
TestBroadcastReplicationAction(Settings settings, ClusterService clusterService, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
TransportReplicationAction replicatedBroadcastShardAction) {
super("test-broadcast-replication-action", DummyBroadcastRequest::new, settings, clusterService, transportService,
actionFilters, indexNameExpressionResolver, replicatedBroadcastShardAction);
}

View File

@ -59,7 +59,7 @@ public class NodeClientHeadersTests extends AbstractClientHeadersTestCase {
private static class InternalTransportAction extends TransportAction {
private InternalTransportAction(Settings settings, String actionName, ThreadPool threadPool) {
super(settings, actionName, threadPool, EMPTY_FILTERS, new TaskManager(settings, threadPool, Collections.emptySet()));
super(settings, actionName, EMPTY_FILTERS, new TaskManager(settings, threadPool, Collections.emptySet()));
}
@Override

View File

@ -20,7 +20,6 @@
package org.elasticsearch.index.mapper;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FloatPoint;
import org.apache.lucene.document.HalfFloatPoint;
@ -37,10 +36,11 @@ import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.core.internal.io.IOUtils;
import org.apache.lucene.util.TestUtil;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.index.mapper.MappedFieldType.Relation;
import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType;
import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType;
import org.hamcrest.Matchers;
import org.junit.Before;
@ -68,6 +68,17 @@ public class NumberFieldTypeTests extends FieldTypeTestCase {
return new NumberFieldMapper.NumberFieldType(type);
}
public void testEqualsWithDifferentNumberTypes() {
NumberType type = randomFrom(NumberType.values());
NumberFieldType fieldType = new NumberFieldType(type);
NumberType otherType = randomValueOtherThan(type,
() -> randomFrom(NumberType.values()));
NumberFieldType otherFieldType = new NumberFieldType(otherType);
assertNotEquals(fieldType, otherFieldType);
}
public void testIsFieldWithinQuery() throws IOException {
MappedFieldType ft = createDefaultFieldType();
// current impl ignores args and should always return INTERSECTS

View File

@ -423,7 +423,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
}
@Override
void prepareTargetForTranslog(final boolean createNewTranslog, final int totalTranslogOps) throws IOException {
void prepareTargetForTranslog(final boolean fileBasedRecovery, final int totalTranslogOps) throws IOException {
prepareTargetForTranslogCalled.set(true);
}

View File

@ -34,7 +34,6 @@ import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseField;
@ -511,10 +510,9 @@ public class TestPersistentTasksPlugin extends Plugin implements ActionPlugin, P
TestTasksRequest, TestTasksResponse, TestTaskResponse> {
@Inject
public TransportTestTaskAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, String nodeExecutor) {
super(settings, TestTaskAction.NAME, threadPool, clusterService, transportService, actionFilters,
public TransportTestTaskAction(Settings settings, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters) {
super(settings, TestTaskAction.NAME, clusterService, transportService, actionFilters,
TestTasksRequest::new, TestTasksResponse::new, ThreadPool.Names.MANAGEMENT);
}

View File

@ -11,7 +11,6 @@ apply plugin: 'elasticsearch.docs-test'
buildRestTests.expectedUnconvertedCandidates = [
'en/rest-api/watcher/put-watch.asciidoc',
'en/security/authentication/user-cache.asciidoc',
'en/security/authorization/field-and-document-access-control.asciidoc',
'en/security/authorization/run-as-privilege.asciidoc',
'en/security/ccs-clients-integrations/http.asciidoc',
'en/security/authorization/custom-roles-provider.asciidoc',
@ -48,7 +47,6 @@ buildRestTests.expectedUnconvertedCandidates = [
'en/watcher/trigger/schedule/yearly.asciidoc',
'en/watcher/troubleshooting.asciidoc',
'en/rest-api/ml/delete-snapshot.asciidoc',
'en/rest-api/ml/forecast.asciidoc',
'en/rest-api/ml/get-bucket.asciidoc',
'en/rest-api/ml/get-job-stats.asciidoc',
'en/rest-api/ml/get-overall-buckets.asciidoc',
@ -57,7 +55,6 @@ buildRestTests.expectedUnconvertedCandidates = [
'en/rest-api/ml/get-influencer.asciidoc',
'en/rest-api/ml/get-snapshot.asciidoc',
'en/rest-api/ml/post-data.asciidoc',
'en/rest-api/ml/preview-datafeed.asciidoc',
'en/rest-api/ml/revert-snapshot.asciidoc',
'en/rest-api/ml/update-snapshot.asciidoc',
'en/rest-api/watcher/stats.asciidoc',
@ -297,7 +294,9 @@ setups['farequote_index'] = '''
responsetime:
type: float
airline:
type: keyword
type: keyword
doc_count:
type: integer
'''
setups['farequote_data'] = setups['farequote_index'] + '''
- do:
@ -307,11 +306,11 @@ setups['farequote_data'] = setups['farequote_index'] + '''
refresh: true
body: |
{"index": {"_id":"1"}}
{"airline":"JZA","responsetime":990.4628,"time":"2016-02-07T00:00:00+0000"}
{"airline":"JZA","responsetime":990.4628,"time":"2016-02-07T00:00:00+0000", "doc_count": 5}
{"index": {"_id":"2"}}
{"airline":"JBU","responsetime":877.5927,"time":"2016-02-07T00:00:00+0000"}
{"airline":"JBU","responsetime":877.5927,"time":"2016-02-07T00:00:00+0000", "doc_count": 23}
{"index": {"_id":"3"}}
{"airline":"KLM","responsetime":1355.4812,"time":"2016-02-07T00:00:00+0000"}
{"airline":"KLM","responsetime":1355.4812,"time":"2016-02-07T00:00:00+0000", "doc_count": 42}
'''
setups['farequote_job'] = setups['farequote_data'] + '''
- do:
@ -333,6 +332,16 @@ setups['farequote_job'] = setups['farequote_data'] + '''
}
}
'''
setups['farequote_datafeed'] = setups['farequote_job'] + '''
- do:
xpack.ml.put_datafeed:
datafeed_id: "datafeed-farequote"
body: >
{
"job_id":"farequote",
"indexes":"farequote"
}
'''
setups['server_metrics_index'] = '''
- do:
indices.create:

View File

@ -5,7 +5,7 @@
<titleabbrev>Forecast Jobs</titleabbrev>
++++
Predict the future behavior of a time series by using historical behavior.
Predicts the future behavior of a time series by using its historical behavior.
==== Request
@ -62,7 +62,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_forecast
}
--------------------------------------------------
// CONSOLE
// TEST[skip:todo]
// TEST[skip:requires delay]
When the forecast is created, you receive the following results:
[source,js]
@ -72,7 +72,7 @@ When the forecast is created, you receive the following results:
"forecast_id": "wkCWa2IB2lF8nSE_TzZo"
}
----
// NOTCONSOLE
You can subsequently see the forecast in the *Single Metric Viewer* in {kib}.
//and in the results that you retrieve by using {ml} APIs such as the
//<<ml-get-bucket,get bucket API>> and <<ml-get-record,get records API>>.

View File

@ -31,7 +31,6 @@ structure of the data that will be passed to the anomaly detection engine.
You must have `monitor_ml`, `monitor`, `manage_ml`, or `manage` cluster
privileges to use this API. For more information, see
{xpack-ref}/security-privileges.html[Security Privileges].
//<<privileges-list-cluster>>.
==== Security Integration
@ -54,27 +53,30 @@ The following example obtains a preview of the `datafeed-farequote` {dfeed}:
GET _xpack/ml/datafeeds/datafeed-farequote/_preview
--------------------------------------------------
// CONSOLE
// TEST[skip:todo]
// TEST[setup:farequote_datafeed]
The data that is returned for this example is as follows:
[source,js]
----
[
{
"@timestamp": 1454803200000,
"airline": "AAL",
"responsetime": 132.20460510253906
},
{
"@timestamp": 1454803200000,
"time": 1454803200000,
"airline": "JZA",
"doc_count": 5,
"responsetime": 990.4628295898438
},
{
"@timestamp": 1454803200000,
"time": 1454803200000,
"airline": "JBU",
"doc_count": 23,
"responsetime": 877.5927124023438
},
...
{
"time": 1454803200000,
"airline": "KLM",
"doc_count": 42,
"responsetime": 1355.481201171875
}
]
----
// TESTRESPONSE

View File

@ -3,9 +3,11 @@
=== Setting up field and document level security
You can control access to data within an index by adding field and document level
security permissions to a role. Field level security permissions restrict access
to particular fields within a document. Document level security permissions
restrict access to particular documents within an index.
security permissions to a role.
<<field-level-security,Field level security permissions>> restrict access to
particular fields within a document.
<<document-level-security,Document level security permissions>> restrict access
to particular documents within an index.
NOTE: Document and field level security is currently meant to operate with
read-only privileged accounts. Users with document and field level
@ -23,399 +25,6 @@ grant wider access than intended. Each user has a single set of field level and
document level permissions per index. See <<multiple-roles-dls-fls>>.
=====================================================================
[[field-level-security]]
==== Field level security
To enable field level security, specify the fields that each role can access
as part of the indices permissions in a role definition. Field level security is
thus bound to a well-defined set of indices (and potentially a set of
<<document-level-security, documents>>).
The following role definition grants read access only to the `category`,
`@timestamp`, and `message` fields in all the `events-*` indices.
[source,js]
--------------------------------------------------
{
"indices": [
{
"names": [ "events-*" ],
"privileges": [ "read" ],
"field_security" : {
"grant" : [ "category", "@timestamp", "message" ]
}
}
]
}
--------------------------------------------------
Access to the following meta fields is always allowed: `_id`,
`_type`, `_parent`, `_routing`, `_timestamp`, `_ttl`, `_size` and `_index`. If
you specify an empty list of fields, only these meta fields are accessible.
NOTE: Omitting the fields entry entirely disables field-level security.
You can also specify field expressions. For example, the following
example grants read access to all fields that start with an `event_` prefix:
[source,js]
--------------------------------------------------
{
"indices" : [
{
"names" : [ "*" ],
"privileges" : [ "read" ],
"field_security" : {
"grant" : [ "event_*" ]
}
}
]
}
--------------------------------------------------
Use the dot notations to refer to nested fields in more complex documents. For
example, assuming the following document:
[source,js]
--------------------------------------------------
{
"customer": {
"handle": "Jim",
"email": "jim@mycompany.com",
"phone": "555-555-5555"
}
}
--------------------------------------------------
The following role definition enables only read access to the customer `handle`
field:
[source,js]
--------------------------------------------------
{
"indices" : [
{
"names" : [ "*" ],
"privileges" : [ "read" ],
"field_security" : {
"grant" : [ "customer.handle" ]
}
}
]
}
--------------------------------------------------
This is where wildcard support shines. For example, use `customer.*` to enable
only read access to the `customer` data:
[source,js]
--------------------------------------------------
{
"indices" : [
{
"names" : [ "*" ],
"privileges" : [ "read" ],
"field_security" : {
"grant" : [ "customer.*" ]
}
}
]
}
--------------------------------------------------
You can deny permission to access fields with the following syntax:
[source,js]
--------------------------------------------------
{
"indices" : [
{
"names" : [ "*" ],
"privileges" : [ "read" ],
"field_security" : {
"grant" : [ "*"],
"except": [ "customer.handle" ]
}
}
]
}
--------------------------------------------------
The following rules apply:
* The absence of `field_security` in a role is equivalent to * access.
* If permission has been granted explicitly to some fields, you can specify
denied fields. The denied fields must be a subset of the fields to which
permissions were granted.
* Defining denied and granted fields implies access to all granted fields except
those which match the pattern in the denied fields.
For example:
[source,js]
--------------------------------------------------
{
"indices" : [
{
"names" : [ "*" ],
"privileges" : [ "read" ],
"field_security" : {
"except": [ "customer.handle" ],
"grant" : [ "customer.*" ]
}
}
]
}
--------------------------------------------------
In the above example, users can read all fields with the prefix "customer."
except for "customer.handle".
An empty array for `grant` (for example, `"grant" : []`) means that access has
not been granted to any fields.
===== Field Level Security and Roles
When a user has several roles that specify field level permissions, the
resulting field level permissions per index are the union of the individual role
permissions. For example, if these two roles are merged:
[source,js]
--------------------------------------------------
{
// role 1
...
"indices" : [
{
"names" : [ "*" ],
"privileges" : [ "read" ],
"field_security" : {
"grant": [ "a.*" ],
"except" : [ "a.b*" ]
}
}
]
}
{
// role 2
...
"indices" : [
{
"names" : [ "*" ],
"privileges" : [ "read" ],
"field_security" : {
"grant": [ "a.b*" ],
"except" : [ "a.b.c*" ]
}
}
]
}
--------------------------------------------------
The resulting permission is equal to:
[source,js]
--------------------------------------------------
{
// role 1 + role 2
...
"indices" : [
{
"names" : [ "*" ],
"privileges" : [ "read" ],
"field_security" : {
"grant": [ "a.*" ],
"except" : [ "a.b.c*" ]
}
}
]
}
--------------------------------------------------
[[document-level-security]]
==== Document level security
Document level security restricts the documents that users have read access to.
To enable document level security, specify a query that matches all the
accessible documents as part of the indices permissions within a role definition.
Document level security is thus bound to a well defined set of indices.
Enabling document level security restricts which documents can be accessed from
any document-based read API. To enable document level security, you use a query
to specify the documents that each role can access in the `roles.yml` file.
You specify the document query with the `query` option. The document query is
associated with a particular index or index pattern and operates in conjunction
with the privileges specified for the indices.
The following role definition grants read access only to documents that
belong to the `click` category within all the `events-*` indices:
[source,js]
--------------------------------------------------
{
"indices": [
{
"names": [ "events-*" ],
"privileges": [ "read" ],
"query": "{\"match\": {\"category\": \"click\"}}"
}
]
}
--------------------------------------------------
NOTE: Omitting the `query` entry entirely disables document level security for
the respective indices permission entry.
The specified `query` expects the same format as if it was defined in the
search request and supports the full {es} {ref}/query-dsl.html[Query DSL].
For example, the following role grants read access only to the documents whose
`department_id` equals `12`:
[source,js]
--------------------------------------------------
{
"indices" : [
{
"names" : [ "*" ],
"privileges" : [ "read" ],
"query" : {
"term" : { "department_id" : 12 }
}
}
]
}
--------------------------------------------------
NOTE: `query` also accepts queries written as string values.
[[templating-role-query]]
===== Templating a role query
You can use Mustache templates in a role query to insert the username of the
current authenticated user into the role. Like other places in {es} that support
templating or scripting, you can specify inline, stored, or file-based templates
and define custom parameters. You access the details for the current
authenticated user through the `_user` parameter.
For example, the following role query uses a template to insert the username
of the current authenticated user:
[source,js]
--------------------------------------------------
{
"indices" : [
{
"names" : [ "my_index" ],
"privileges" : [ "read" ],
"query" : {
"template" : {
"source" : {
"term" : { "acl.username" : "{{_user.username}}" }
}
}
}
}
]
}
--------------------------------------------------
You can access the following information through the `_user` variable:
[options="header"]
|======
| Property | Description
| `_user.username` | The username of the current authenticated user.
| `_user.full_name` | If specified, the full name of the current authenticated user.
| `_user.email` | If specified, the email of the current authenticated user.
| `_user.roles` | If associated, a list of the role names of the current authenticated user.
| `_user.metadata` | If specified, a hash holding custom metadata of the current authenticated user.
|======
You can also access custom user metadata. For example, if you maintain a
`group_id` in your user metadata, you can apply document level security
based on the `group.id` field in your documents:
[source,js]
--------------------------------------------------
{
"indices" : [
{
"names" : [ "my_index" ],
"privileges" : [ "read" ],
"query" : {
"template" : {
"source" : {
"term" : { "group.id" : "{{_user.metadata.group_id}}" }
}
}
}
}
]
}
--------------------------------------------------
[[set-security-user-processor]]
===== Set security user ingest processor
If an index is shared by many small users it makes sense to put all these users
into the same index. Having a dedicated index or shard per user is wasteful.
To guarantee that a user reads only their own documents, it makes sense to set up
document level security. In this scenario, each document must have the username
or role name associated with it, so that this information can be used by the
role query for document level security. This is a situation where the
`set_security_user` ingest processor can help.
NOTE: Document level security doesn't apply to write APIs. You must use unique
ids for each user that uses the same index, otherwise they might overwrite other
users' documents. The ingest processor just adds properties for the current
authenticated user to the documents that are being indexed.
The `set_security_user` processor attaches user-related details (such as
`username`, `roles`, `email`, `full_name` and `metadata` ) from the current
authenticated user to the current document by pre-processing the ingest. When
you index data with an ingest pipeline, user details are automatically attached
to the document. For example:
[source,js]
--------------------------------------------------
PUT shared-logs/log/1?pipeline=my_pipeline_id
{
...
}
--------------------------------------------------
Read the {ref}/ingest.html[ingest docs] for more information
about setting up a pipeline and other processors.
[[set-security-user-options]]
.Set Security User Options
[options="header"]
|======
| Name | Required | Default | Description
| `field` | yes | - | The field to store the user information into.
| `properties` | no | [`username`, `roles`, `email`, `full_name`, `metadata`] | Controls what user related properties are added to the `field`.
|======
The following example adds all user details for the current authenticated user
to the `user` field for all documents that are processed by this pipeline:
[source,js]
--------------------------------------------------
{
"processors" : [
{
"set_security_user": {
"field": "user"
}
}
]
}
--------------------------------------------------
[[multiple-roles-dls-fls]]
==== Multiple roles with document and field level security
@ -447,3 +56,6 @@ fields.
If you need to restrict access to both documents and fields, consider splitting
documents by index instead.
include::role-templates.asciidoc[]
include::set-security-user.asciidoc[]

View File

@ -0,0 +1,71 @@
[[templating-role-query]]
==== Templating a role query
When you create a role, you can specify a query that defines the
<<document-level-security,document level security permissions>>. You can
optionally use Mustache templates in the role query to insert the username of the
current authenticated user into the role. Like other places in {es} that support
templating or scripting, you can specify inline, stored, or file-based templates
and define custom parameters. You access the details for the current
authenticated user through the `_user` parameter.
For example, the following role query uses a template to insert the username
of the current authenticated user:
[source,js]
--------------------------------------------------
POST /_xpack/security/role/example1
{
"indices" : [
{
"names" : [ "my_index" ],
"privileges" : [ "read" ],
"query" : {
"template" : {
"source" : {
"term" : { "acl.username" : "{{_user.username}}" }
}
}
}
}
]
}
--------------------------------------------------
// CONSOLE
You can access the following information through the `_user` variable:
[options="header"]
|======
| Property | Description
| `_user.username` | The username of the current authenticated user.
| `_user.full_name` | If specified, the full name of the current authenticated user.
| `_user.email` | If specified, the email of the current authenticated user.
| `_user.roles` | If associated, a list of the role names of the current authenticated user.
| `_user.metadata` | If specified, a hash holding custom metadata of the current authenticated user.
|======
You can also access custom user metadata. For example, if you maintain a
`group_id` in your user metadata, you can apply document level security
based on the `group.id` field in your documents:
[source,js]
--------------------------------------------------
POST /_xpack/security/role/example2
{
"indices" : [
{
"names" : [ "my_index" ],
"privileges" : [ "read" ],
"query" : {
"template" : {
"source" : {
"term" : { "group.id" : "{{_user.metadata.group_id}}" }
}
}
}
}
]
}
--------------------------------------------------
// CONSOLE

View File

@ -0,0 +1,61 @@
[[set-security-user-processor]]
==== Pre-processing documents to add security details
// If an index is shared by many small users it makes sense to put all these users
// into the same index. Having a dedicated index or shard per user is wasteful.
// TBD: It's unclear why we're putting users in an index here.
To guarantee that a user reads only their own documents, it makes sense to set up
document level security. In this scenario, each document must have the username
or role name associated with it, so that this information can be used by the
role query for document level security. This is a situation where the
`set_security_user` ingest processor can help.
NOTE: Document level security doesn't apply to write APIs. You must use unique
ids for each user that uses the same index, otherwise they might overwrite other
users' documents. The ingest processor just adds properties for the current
authenticated user to the documents that are being indexed.
The `set_security_user` processor attaches user-related details (such as
`username`, `roles`, `email`, `full_name` and `metadata` ) from the current
authenticated user to the current document by pre-processing the ingest. When
you index data with an ingest pipeline, user details are automatically attached
to the document. For example:
[source,js]
--------------------------------------------------
PUT shared-logs/log/1?pipeline=my_pipeline_id
{
...
}
--------------------------------------------------
// NOTCONSOLE
For more information about setting up a pipeline and other processors, see
{ref}/ingest.html[ingest node].
[[set-security-user-options]]
.Set Security User Options
[options="header"]
|======
| Name | Required | Default | Description
| `field` | yes | - | The field to store the user information into.
| `properties` | no | [`username`, `roles`, `email`, `full_name`, `metadata`] | Controls what user related properties are added to the `field`.
|======
The following example adds all user details for the current authenticated user
to the `user` field for all documents that are processed by this pipeline:
[source,js]
--------------------------------------------------
{
"processors" : [
{
"set_security_user": {
"field": "user"
}
}
]
}
--------------------------------------------------
// NOTCONSOLE

View File

@ -0,0 +1 @@
include::syntax-reserved.asciidoc[]

View File

@ -1,5 +1,6 @@
[[sql-spec-reserved]]
=== Reserved Keywords
[appendix]
[[sql-syntax-reserved]]
= Reserved Keywords
Table with reserved keywords that need to be quoted. Also provide an example to make it more obvious.

View File

@ -0,0 +1,63 @@
[[sql-concepts]]
== Conventions and Terminology
For clarity, it is important to establish the meaning behind certain words as, the same wording might convey different meanings to different readers depending on one's familiarity with SQL versus {es}.
NOTE: This documentation while trying to be complete, does assume the reader has _basic_ understanding of {es} and/or SQL. If that is not the case, please continue reading the documentation however take notes and pursue the topics that are unclear either through the main {es} documentation or through the plethora of SQL material available in the open (there are simply too many excellent resources here to enumerate).
As a general rule, {es-sql} as the name indicates provides a SQL interface to {es}. As such, it follows the SQL terminology and conventions first, whenever possible. However the backing engine itself is {es} for which {es-sql} was purposely created hence why features or concepts that are not available, or cannot be mapped correctly, in SQL appear
in {es-sql}.
Last but not least, {es-sql} tries to obey the https://en.wikipedia.org/wiki/Principle_of_least_astonishment[principle of least suprise], though as all things in the world, everything is relative.
=== Mapping concepts across SQL and {es}
While SQL and {es} have different terms for the way the data is organized (and different semantics), essentially their purpose is the same.
So let's start from the bottom; these roughly are:
[cols="1,1,5", options="header"]
|===
|SQL
|{es}
|Description
|`column`
|`field`
|In both cases, at the lowest level, data is stored in in _named_ entries, of a variety of <<sql-data-types, data types>>, containing _one_ value. SQL calls such an entry a _column_ while {es} a _field_.
Notice that in {es} a field can contain _multiple_ values of the same type (esentially a list) while in SQL, a _column_ can contain _exactly_ one value of said type.
{es-sql} will do its best to preserve the SQL semantic and, depending on the query, reject those that return fields with more than one value.
|`row`
|`document`
|++Column++s and ++field++s do _not_ exist by themselves; they are part of a `row` or a `document`. The two have slightly different semantics: a `row` tends to be _strict_ (and have more enforcements) while a `document` tends to be a bit more flexible or loose (while still having a structure).
|`table`
|`index`
|The target against which queries, whether in SQL or {es} get executed against.
|`schema`
|_implicit_
|In RDBMS, `schema` is mainly a namespace of tables and typically used as a security boundary. {es} does not provide an equivalent concept for it. However when security is enabled, {es} automatically applies the security enforcement so that a role sees only the data it is allowed to (in SQL jargon, its _schema_).
|`catalog` or `database`
|`cluster` instance
|In SQL, `catalog` or `database` are used interchangebly and represent a set of schemas that is, a number of tables.
In {es} the set of indices available are grouped in a `cluster`. The semantics also differ a bit; a `database` is essentially yet another namespace (which can have some implications on the way data is stored) while an {es} `cluster` is a runtime instance, or rather a set of at least one {es} instance (typically running distributed).
In practice this means that while in SQL one can potentially have multiple catalogs inside an instance, in {es} one is restricted to only _one_.
|`cluster`
|`cluster` (federated)
|Traditionally in SQL, _cluster_ refers to a single RDMBS instance which contains a number of ++catalog++s or ++database++s (see above). The same word can be reused inside {es} as well however its semantic clarified a bit.
While RDBMS tend to have only one running instance, on a single machine (_not_ distributed), {es} goes the opposite way and by default, is distributed and multi-instance.
Further more, an {es} `cluster` can be connected to other ++cluster++s in a _federated_ fashion thus `cluster` means:
single cluster::
Multiple {es} instances typically distributed across machines, running within the same namespace.
multiple clusters::
Multiple clusters, each with its own namespace, connected to each other in a federated setup (see <<modules-cross-cluster-search, Cross cluster Search>>).
|===
As one can see while the mapping between the concepts are not exactly one to one and the semantics somewhat different, there are more things in common than differences. In fact, thanks to SQL declarative nature, many concepts can move across {es} transparently and the terminology of the two likely to be used interchangebly through-out the rest of the material.

View File

@ -1,4 +1,3 @@
[role="xpack"]
[[sql-cli]]
== SQL CLI
@ -37,18 +36,3 @@ James S.A. Corey |Leviathan Wakes |561 |1306972800000
--------------------------------------------------
// TODO it'd be lovely to be able to assert that this is correct but
// that is probably more work then it is worth right now.
[[sql-cli-permissions]]
[NOTE]
===============================
If you are using Security you need to add a few permissions to
users so they can run SQL. To run SQL using the CLI a user needs
`read`, `indices:admin/get`, and `cluster:monitor/main`. The
following example configures a role that can run SQL in the CLI
for the `test` and `bort` indices:
["source","yaml",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-tests}/security/roles.yml[cli_jdbc]
--------------------------------------------------
===============================

View File

@ -1,4 +1,3 @@
[role="xpack"]
[[sql-jdbc]]
== SQL JDBC
@ -36,11 +35,11 @@ from `artifacts.elastic.co/maven` by adding it to the repositories list:
[float]
=== Setup
The driver main class is `org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcDriver`. Note the driver
also implements the JDBC 4.0 +Service Provider+ mechanism meaning it is registerd automatically
The driver main class is `org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcDriver`.
Note the driver implements the JDBC 4.0 +Service Provider+ mechanism meaning it is registerd automatically
as long as its available in the classpath.
Once registered, the driver expects the following syntax as an URL:
Once registered, the driver understands the following syntax as an URL:
["source","text",subs="attributes"]
----
@ -120,12 +119,12 @@ Query timeout (in seconds). That is the maximum amount of time waiting for a que
To put all of it together, the following URL:
["source","text",subs="attributes"]
["source","text"]
----
jdbc:es://http://server:3456/timezone=UTC&page.size=250
----
Opens up a {es-jdbc} connection to `server` on port `3456`, setting the JDBC timezone to `UTC` and its pagesize to `250` entries.
Opens up a {es-sql} connection to `server` on port `3456`, setting the JDBC connection timezone to `UTC` and its pagesize to `250` entries.
=== API usage
@ -175,20 +174,4 @@ connection. For example:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{jdbc-tests}/SimpleExampleTestCase.java[simple_example]
--------------------------------------------------
[[sql-jdbc-permissions]]
[NOTE]
===============================
If you are using Security you need to add a few permissions to
users so they can run SQL. To run SQL a user needs `read` and
`indices:admin/get`. Some parts of the API require
`cluster:monitor/main`. The following example configures a
role that can run SQL in JDBC querying the `test` and `bort`
indices:
["source","yaml",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-tests}/security/roles.yml[cli_jdbc]
--------------------------------------------------
===============================
--------------------------------------------------

View File

@ -186,17 +186,3 @@ or fewer results though. `time_zone` is the time zone to use for date
functions and date parsing. `time_zone` defaults to `utc` and can take
any values documented
http://www.joda.org/joda-time/apidocs/org/joda/time/DateTimeZone.html[here].
[[sql-rest-permissions]]
[NOTE]
===============================
If you are using Security you need to add a few permissions to
users so they can run SQL. To run SQL a user needs `read` and
`indices:admin/get`. The following example configures a role
that can run SQL against the `test` and `bort` indices:
["source","yaml",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{sql-tests}/security/roles.yml[rest]
--------------------------------------------------
===============================

Some files were not shown because too many files have changed in this diff Show More