Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
830198fa00
|
@ -209,6 +209,14 @@ Before submitting your changes, run the test suite to make sure that nothing is
|
|||
./gradlew check
|
||||
```
|
||||
|
||||
If your changes affect only the documentation, run:
|
||||
|
||||
```sh
|
||||
./gradlew -p docs check
|
||||
```
|
||||
For more information about testing code examples in the documentation, see
|
||||
https://github.com/elastic/elasticsearch/blob/master/docs/README.asciidoc
|
||||
|
||||
### Project layout
|
||||
|
||||
This repository is split into many top level directories. The most important
|
||||
|
|
|
@ -79,7 +79,7 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
|
|||
|
||||
@Override
|
||||
public boolean bulkIndex(List<String> bulkData) {
|
||||
NoopBulkRequestBuilder builder = NoopBulkAction.INSTANCE.newRequestBuilder(client);
|
||||
NoopBulkRequestBuilder builder = new NoopBulkRequestBuilder(client,NoopBulkAction.INSTANCE);
|
||||
for (String bulkItem : bulkData) {
|
||||
builder.add(new IndexRequest(indexName, typeName).source(bulkItem.getBytes(StandardCharsets.UTF_8), XContentType.JSON));
|
||||
}
|
||||
|
@ -108,7 +108,7 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
|
|||
@Override
|
||||
public boolean search(String source) {
|
||||
final SearchResponse response;
|
||||
NoopSearchRequestBuilder builder = NoopSearchAction.INSTANCE.newRequestBuilder(client);
|
||||
NoopSearchRequestBuilder builder = new NoopSearchRequestBuilder(client, NoopSearchAction.INSTANCE);
|
||||
try {
|
||||
builder.setIndices(indexName);
|
||||
builder.setQuery(QueryBuilders.wrapperQuery(source));
|
||||
|
|
|
@ -21,9 +21,8 @@ package org.elasticsearch.plugin.noop.action.bulk;
|
|||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class NoopBulkAction extends Action<BulkRequest, BulkResponse, NoopBulkRequestBuilder> {
|
||||
public class NoopBulkAction extends Action<BulkRequest, BulkResponse> {
|
||||
public static final String NAME = "mock:data/write/bulk";
|
||||
|
||||
public static final NoopBulkAction INSTANCE = new NoopBulkAction();
|
||||
|
@ -32,11 +31,6 @@ public class NoopBulkAction extends Action<BulkRequest, BulkResponse, NoopBulkRe
|
|||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NoopBulkRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new NoopBulkRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BulkResponse newResponse() {
|
||||
return new BulkResponse(null, 0);
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
public class NoopBulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkResponse, NoopBulkRequestBuilder>
|
||||
public class NoopBulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkResponse>
|
||||
implements WriteRequestBuilder<NoopBulkRequestBuilder> {
|
||||
|
||||
public NoopBulkRequestBuilder(ElasticsearchClient client, NoopBulkAction action) {
|
||||
|
|
|
@ -21,9 +21,8 @@ package org.elasticsearch.plugin.noop.action.search;
|
|||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class NoopSearchAction extends Action<SearchRequest, SearchResponse, NoopSearchRequestBuilder> {
|
||||
public class NoopSearchAction extends Action<SearchRequest, SearchResponse> {
|
||||
public static final NoopSearchAction INSTANCE = new NoopSearchAction();
|
||||
public static final String NAME = "mock:data/read/search";
|
||||
|
||||
|
@ -31,11 +30,6 @@ public class NoopSearchAction extends Action<SearchRequest, SearchResponse, Noop
|
|||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NoopSearchRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new NoopSearchRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchResponse newResponse() {
|
||||
return new SearchResponse();
|
||||
|
|
|
@ -42,7 +42,7 @@ import org.elasticsearch.search.suggest.SuggestBuilder;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest, SearchResponse, NoopSearchRequestBuilder> {
|
||||
public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest, SearchResponse> {
|
||||
|
||||
public NoopSearchRequestBuilder(ElasticsearchClient client, NoopSearchAction action) {
|
||||
super(client, action, new SearchRequest());
|
||||
|
|
|
@ -23,6 +23,8 @@ import org.apache.http.Header;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
|
||||
import org.elasticsearch.action.ingest.GetPipelineRequest;
|
||||
import org.elasticsearch.action.ingest.GetPipelineResponse;
|
||||
import org.elasticsearch.action.ingest.PutPipelineRequest;
|
||||
import org.elasticsearch.action.ingest.PutPipelineResponse;
|
||||
|
||||
|
@ -87,4 +89,26 @@ public final class ClusterClient {
|
|||
restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::putPipeline,
|
||||
PutPipelineResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an existing pipeline
|
||||
* <p>
|
||||
* See
|
||||
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/get-pipeline-api.html"> Get Pipeline API on elastic.co</a>
|
||||
*/
|
||||
public GetPipelineResponse getPipeline(GetPipelineRequest request, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::getPipeline,
|
||||
GetPipelineResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously get an existing pipeline
|
||||
* <p>
|
||||
* See
|
||||
* <a href="https://www.elastic.co/guide/en/elasticsearch/reference/master/get-pipeline-api.html"> Get Pipeline API on elastic.co</a>
|
||||
*/
|
||||
public void getPipelineAsync(GetPipelineRequest request, ActionListener<GetPipelineResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::getPipeline,
|
||||
GetPipelineResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,6 +61,7 @@ import org.elasticsearch.action.get.GetRequest;
|
|||
import org.elasticsearch.action.get.MultiGetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.ingest.PutPipelineRequest;
|
||||
import org.elasticsearch.action.ingest.GetPipelineRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.MultiSearchRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
|
@ -620,6 +621,18 @@ final class RequestConverters {
|
|||
return request;
|
||||
}
|
||||
|
||||
static Request getPipeline(GetPipelineRequest getPipelineRequest) {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_ingest/pipeline")
|
||||
.addCommaSeparatedPathParts(getPipelineRequest.getIds())
|
||||
.build();
|
||||
Request request = new Request(HttpGet.METHOD_NAME, endpoint);
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withMasterTimeout(getPipelineRequest.masterNodeTimeout());
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request putPipeline(PutPipelineRequest putPipelineRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder()
|
||||
.addPathPartAsIs("_ingest/pipeline")
|
||||
|
|
|
@ -279,6 +279,17 @@ public class RestHighLevelClient implements Closeable {
|
|||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
|
||||
*/
|
||||
public final BulkResponse bulk(BulkRequest bulkRequest, RequestOptions options) throws IOException {
|
||||
return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a bulk request using the Bulk API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #bulk(BulkRequest, RequestOptions)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, BulkResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
@ -288,6 +299,17 @@ public class RestHighLevelClient implements Closeable {
|
|||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
|
||||
*/
|
||||
public final void bulkAsync(BulkRequest bulkRequest, RequestOptions options, ActionListener<BulkResponse> listener) {
|
||||
performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously executes a bulk request using the Bulk API
|
||||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
|
||||
* @deprecated Prefer {@link #bulkAsync(BulkRequest, RequestOptions, ActionListener)}
|
||||
*/
|
||||
@Deprecated
|
||||
public final void bulkAsync(BulkRequest bulkRequest, ActionListener<BulkResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, BulkResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
@ -584,6 +606,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
FieldCapabilitiesResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
protected final <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
|
@ -591,16 +614,34 @@ public class RestHighLevelClient implements Closeable {
|
|||
return performRequest(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser), ignores, headers);
|
||||
}
|
||||
|
||||
protected final <Req extends ActionRequest, Resp> Resp performRequestAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
Set<Integer> ignores) throws IOException {
|
||||
return performRequest(request, requestConverter, options,
|
||||
response -> parseEntity(response.getEntity(), entityParser), ignores);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
protected final <Req extends ActionRequest, Resp> Resp performRequest(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
Set<Integer> ignores, Header... headers) throws IOException {
|
||||
return performRequest(request, requestConverter, optionsForHeaders(headers), responseConverter, ignores);
|
||||
}
|
||||
|
||||
protected final <Req extends ActionRequest, Resp> Resp performRequest(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
Set<Integer> ignores) throws IOException {
|
||||
ActionRequestValidationException validationException = request.validate();
|
||||
if (validationException != null) {
|
||||
throw validationException;
|
||||
}
|
||||
Request req = requestConverter.apply(request);
|
||||
addHeaders(req, headers);
|
||||
req.setOptions(options);
|
||||
Response response;
|
||||
try {
|
||||
response = client.performRequest(req);
|
||||
|
@ -626,6 +667,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
protected final <Req extends ActionRequest, Resp> void performRequestAsyncAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
|
@ -634,10 +676,28 @@ public class RestHighLevelClient implements Closeable {
|
|||
listener, ignores, headers);
|
||||
}
|
||||
|
||||
protected final <Req extends ActionRequest, Resp> void performRequestAsyncAndParseEntity(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<XContentParser, Resp, IOException> entityParser,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores) {
|
||||
performRequestAsync(request, requestConverter, options,
|
||||
response -> parseEntity(response.getEntity(), entityParser), listener, ignores);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
protected final <Req extends ActionRequest, Resp> void performRequestAsync(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores, Header... headers) {
|
||||
performRequestAsync(request, requestConverter, optionsForHeaders(headers), responseConverter, listener, ignores);
|
||||
}
|
||||
|
||||
protected final <Req extends ActionRequest, Resp> void performRequestAsync(Req request,
|
||||
CheckedFunction<Req, Request, IOException> requestConverter,
|
||||
RequestOptions options,
|
||||
CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
ActionListener<Resp> listener, Set<Integer> ignores) {
|
||||
ActionRequestValidationException validationException = request.validate();
|
||||
if (validationException != null) {
|
||||
listener.onFailure(validationException);
|
||||
|
@ -650,19 +710,12 @@ public class RestHighLevelClient implements Closeable {
|
|||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
addHeaders(req, headers);
|
||||
req.setOptions(options);
|
||||
|
||||
ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores);
|
||||
client.performRequestAsync(req, responseListener);
|
||||
}
|
||||
|
||||
private static void addHeaders(Request request, Header... headers) {
|
||||
Objects.requireNonNull(headers, "headers cannot be null");
|
||||
for (Header header : headers) {
|
||||
request.addHeader(header.getName(), header.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
final <Resp> ResponseListener wrapResponseListener(CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
ActionListener<Resp> actionListener, Set<Integer> ignores) {
|
||||
return new ResponseListener() {
|
||||
|
@ -746,6 +799,15 @@ public class RestHighLevelClient implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
private static RequestOptions optionsForHeaders(Header[] headers) {
|
||||
RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder();
|
||||
for (Header header : headers) {
|
||||
Objects.requireNonNull(header, "header cannot be null");
|
||||
options.addHeader(header.getName(), header.getValue());
|
||||
}
|
||||
return options.build();
|
||||
}
|
||||
|
||||
static boolean convertExistsResponse(Response response) {
|
||||
return response.getStatusLine().getStatusCode() == 200;
|
||||
}
|
||||
|
|
|
@ -22,6 +22,8 @@ package org.elasticsearch.client;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
|
||||
import org.elasticsearch.action.ingest.GetPipelineRequest;
|
||||
import org.elasticsearch.action.ingest.GetPipelineResponse;
|
||||
import org.elasticsearch.action.ingest.PutPipelineRequest;
|
||||
import org.elasticsearch.action.ingest.PutPipelineResponse;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
|
@ -32,7 +34,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.ingest.Pipeline;
|
||||
import org.elasticsearch.ingest.PipelineConfiguration;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -113,31 +115,7 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
public void testPutPipeline() throws IOException {
|
||||
String id = "some_pipeline_id";
|
||||
XContentType xContentType = randomFrom(XContentType.values());
|
||||
XContentBuilder pipelineBuilder = XContentBuilder.builder(xContentType.xContent());
|
||||
pipelineBuilder.startObject();
|
||||
{
|
||||
pipelineBuilder.field(Pipeline.DESCRIPTION_KEY, "some random set of processors");
|
||||
pipelineBuilder.startArray(Pipeline.PROCESSORS_KEY);
|
||||
{
|
||||
pipelineBuilder.startObject().startObject("set");
|
||||
{
|
||||
pipelineBuilder
|
||||
.field("field", "foo")
|
||||
.field("value", "bar");
|
||||
}
|
||||
pipelineBuilder.endObject().endObject();
|
||||
pipelineBuilder.startObject().startObject("convert");
|
||||
{
|
||||
pipelineBuilder
|
||||
.field("field", "rank")
|
||||
.field("type", "integer");
|
||||
}
|
||||
pipelineBuilder.endObject().endObject();
|
||||
}
|
||||
pipelineBuilder.endArray();
|
||||
}
|
||||
pipelineBuilder.endObject();
|
||||
XContentBuilder pipelineBuilder = buildRandomXContentPipeline();
|
||||
PutPipelineRequest request = new PutPipelineRequest(
|
||||
id,
|
||||
BytesReference.bytes(pipelineBuilder),
|
||||
|
@ -147,4 +125,27 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
|
|||
execute(request, highLevelClient().cluster()::putPipeline, highLevelClient().cluster()::putPipelineAsync);
|
||||
assertTrue(putPipelineResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
public void testGetPipeline() throws IOException {
|
||||
String id = "some_pipeline_id";
|
||||
XContentBuilder pipelineBuilder = buildRandomXContentPipeline();
|
||||
{
|
||||
PutPipelineRequest request = new PutPipelineRequest(
|
||||
id,
|
||||
BytesReference.bytes(pipelineBuilder),
|
||||
pipelineBuilder.contentType()
|
||||
);
|
||||
createPipeline(request);
|
||||
}
|
||||
|
||||
GetPipelineRequest request = new GetPipelineRequest(id);
|
||||
|
||||
GetPipelineResponse response =
|
||||
execute(request, highLevelClient().cluster()::getPipeline, highLevelClient().cluster()::getPipelineAsync);
|
||||
assertTrue(response.isFound());
|
||||
assertEquals(response.pipelines().get(0).getId(), id);
|
||||
PipelineConfiguration expectedConfig =
|
||||
new PipelineConfiguration(id, BytesReference.bytes(pipelineBuilder), pipelineBuilder.contentType());
|
||||
assertEquals(expectedConfig.getConfigAsMap(), response.pipelines().get(0).getConfigAsMap());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.http.RequestLine;
|
|||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.message.BasicRequestLine;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -48,11 +47,13 @@ import java.lang.reflect.Method;
|
|||
import java.lang.reflect.Modifier;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.doAnswer;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
@ -73,12 +74,12 @@ public class CustomRestHighLevelClientTests extends ESTestCase {
|
|||
final RestClient restClient = mock(RestClient.class);
|
||||
restHighLevelClient = new CustomRestClient(restClient);
|
||||
|
||||
doAnswer(inv -> mockPerformRequest(((Request) inv.getArguments()[0]).getHeaders().iterator().next()))
|
||||
doAnswer(inv -> mockPerformRequest((Request) inv.getArguments()[0]))
|
||||
.when(restClient)
|
||||
.performRequest(any(Request.class));
|
||||
|
||||
doAnswer(inv -> mockPerformRequestAsync(
|
||||
((Request) inv.getArguments()[0]).getHeaders().iterator().next(),
|
||||
((Request) inv.getArguments()[0]),
|
||||
(ResponseListener) inv.getArguments()[1]))
|
||||
.when(restClient)
|
||||
.performRequestAsync(any(Request.class), any(ResponseListener.class));
|
||||
|
@ -87,26 +88,32 @@ public class CustomRestHighLevelClientTests extends ESTestCase {
|
|||
|
||||
public void testCustomEndpoint() throws IOException {
|
||||
final MainRequest request = new MainRequest();
|
||||
final Header header = new BasicHeader("node_name", randomAlphaOfLengthBetween(1, 10));
|
||||
String nodeName = randomAlphaOfLengthBetween(1, 10);
|
||||
|
||||
MainResponse response = restHighLevelClient.custom(request, header);
|
||||
assertEquals(header.getValue(), response.getNodeName());
|
||||
MainResponse response = restHighLevelClient.custom(request, optionsForNodeName(nodeName));
|
||||
assertEquals(nodeName, response.getNodeName());
|
||||
|
||||
response = restHighLevelClient.customAndParse(request, header);
|
||||
assertEquals(header.getValue(), response.getNodeName());
|
||||
response = restHighLevelClient.customAndParse(request, optionsForNodeName(nodeName));
|
||||
assertEquals(nodeName, response.getNodeName());
|
||||
}
|
||||
|
||||
public void testCustomEndpointAsync() throws Exception {
|
||||
final MainRequest request = new MainRequest();
|
||||
final Header header = new BasicHeader("node_name", randomAlphaOfLengthBetween(1, 10));
|
||||
String nodeName = randomAlphaOfLengthBetween(1, 10);
|
||||
|
||||
PlainActionFuture<MainResponse> future = PlainActionFuture.newFuture();
|
||||
restHighLevelClient.customAsync(request, future, header);
|
||||
assertEquals(header.getValue(), future.get().getNodeName());
|
||||
restHighLevelClient.customAsync(request, optionsForNodeName(nodeName), future);
|
||||
assertEquals(nodeName, future.get().getNodeName());
|
||||
|
||||
future = PlainActionFuture.newFuture();
|
||||
restHighLevelClient.customAndParseAsync(request, future, header);
|
||||
assertEquals(header.getValue(), future.get().getNodeName());
|
||||
restHighLevelClient.customAndParseAsync(request, optionsForNodeName(nodeName), future);
|
||||
assertEquals(nodeName, future.get().getNodeName());
|
||||
}
|
||||
|
||||
private static RequestOptions optionsForNodeName(String nodeName) {
|
||||
RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder();
|
||||
options.addHeader("node_name", nodeName);
|
||||
return options.build();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -115,27 +122,27 @@ public class CustomRestHighLevelClientTests extends ESTestCase {
|
|||
*/
|
||||
@SuppressForbidden(reason = "We're forced to uses Class#getDeclaredMethods() here because this test checks protected methods")
|
||||
public void testMethodsVisibility() throws ClassNotFoundException {
|
||||
final String[] methodNames = new String[]{"performRequest",
|
||||
"performRequestAsync",
|
||||
final String[] methodNames = new String[]{"parseEntity",
|
||||
"parseResponseException",
|
||||
"performRequest",
|
||||
"performRequestAndParseEntity",
|
||||
"performRequestAsyncAndParseEntity",
|
||||
"parseEntity",
|
||||
"parseResponseException"};
|
||||
"performRequestAsync",
|
||||
"performRequestAsyncAndParseEntity"};
|
||||
|
||||
final List<String> protectedMethods = Arrays.stream(RestHighLevelClient.class.getDeclaredMethods())
|
||||
final Set<String> protectedMethods = Arrays.stream(RestHighLevelClient.class.getDeclaredMethods())
|
||||
.filter(method -> Modifier.isProtected(method.getModifiers()))
|
||||
.map(Method::getName)
|
||||
.collect(Collectors.toList());
|
||||
.collect(Collectors.toCollection(TreeSet::new));
|
||||
|
||||
assertThat(protectedMethods, containsInAnyOrder(methodNames));
|
||||
assertThat(protectedMethods, contains(methodNames));
|
||||
}
|
||||
|
||||
/**
|
||||
* Mocks the asynchronous request execution by calling the {@link #mockPerformRequest(Header)} method.
|
||||
* Mocks the asynchronous request execution by calling the {@link #mockPerformRequest(Request)} method.
|
||||
*/
|
||||
private Void mockPerformRequestAsync(Header httpHeader, ResponseListener responseListener) {
|
||||
private Void mockPerformRequestAsync(Request request, ResponseListener responseListener) {
|
||||
try {
|
||||
responseListener.onSuccess(mockPerformRequest(httpHeader));
|
||||
responseListener.onSuccess(mockPerformRequest(request));
|
||||
} catch (IOException e) {
|
||||
responseListener.onFailure(e);
|
||||
}
|
||||
|
@ -145,7 +152,9 @@ public class CustomRestHighLevelClientTests extends ESTestCase {
|
|||
/**
|
||||
* Mocks the synchronous request execution like if it was executed by Elasticsearch.
|
||||
*/
|
||||
private Response mockPerformRequest(Header httpHeader) throws IOException {
|
||||
private Response mockPerformRequest(Request request) throws IOException {
|
||||
assertThat(request.getOptions().getHeaders(), hasSize(1));
|
||||
Header httpHeader = request.getOptions().getHeaders().get(0);
|
||||
final Response mockResponse = mock(Response.class);
|
||||
when(mockResponse.getHost()).thenReturn(new HttpHost("localhost", 9200));
|
||||
|
||||
|
@ -171,20 +180,20 @@ public class CustomRestHighLevelClientTests extends ESTestCase {
|
|||
super(restClient, RestClient::close, Collections.emptyList());
|
||||
}
|
||||
|
||||
MainResponse custom(MainRequest mainRequest, Header... headers) throws IOException {
|
||||
return performRequest(mainRequest, this::toRequest, this::toResponse, emptySet(), headers);
|
||||
MainResponse custom(MainRequest mainRequest, RequestOptions options) throws IOException {
|
||||
return performRequest(mainRequest, this::toRequest, options, this::toResponse, emptySet());
|
||||
}
|
||||
|
||||
MainResponse customAndParse(MainRequest mainRequest, Header... headers) throws IOException {
|
||||
return performRequestAndParseEntity(mainRequest, this::toRequest, MainResponse::fromXContent, emptySet(), headers);
|
||||
MainResponse customAndParse(MainRequest mainRequest, RequestOptions options) throws IOException {
|
||||
return performRequestAndParseEntity(mainRequest, this::toRequest, options, MainResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
void customAsync(MainRequest mainRequest, ActionListener<MainResponse> listener, Header... headers) {
|
||||
performRequestAsync(mainRequest, this::toRequest, this::toResponse, listener, emptySet(), headers);
|
||||
void customAsync(MainRequest mainRequest, RequestOptions options, ActionListener<MainResponse> listener) {
|
||||
performRequestAsync(mainRequest, this::toRequest, options, this::toResponse, listener, emptySet());
|
||||
}
|
||||
|
||||
void customAndParseAsync(MainRequest mainRequest, ActionListener<MainResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(mainRequest, this::toRequest, MainResponse::fromXContent, listener, emptySet(), headers);
|
||||
void customAndParseAsync(MainRequest mainRequest, RequestOptions options, ActionListener<MainResponse> listener) {
|
||||
performRequestAsyncAndParseEntity(mainRequest, this::toRequest, options, MainResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
Request toRequest(MainRequest mainRequest) throws IOException {
|
||||
|
|
|
@ -21,7 +21,12 @@ package org.elasticsearch.client;
|
|||
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ingest.PutPipelineRequest;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.ingest.Pipeline;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
@ -80,4 +85,42 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase {
|
|||
super(restClient, (client) -> {}, Collections.emptyList());
|
||||
}
|
||||
}
|
||||
|
||||
protected static XContentBuilder buildRandomXContentPipeline() throws IOException {
|
||||
XContentType xContentType = randomFrom(XContentType.values());
|
||||
XContentBuilder pipelineBuilder = XContentBuilder.builder(xContentType.xContent());
|
||||
pipelineBuilder.startObject();
|
||||
{
|
||||
pipelineBuilder.field(Pipeline.DESCRIPTION_KEY, "some random set of processors");
|
||||
pipelineBuilder.startArray(Pipeline.PROCESSORS_KEY);
|
||||
{
|
||||
pipelineBuilder.startObject().startObject("set");
|
||||
{
|
||||
pipelineBuilder
|
||||
.field("field", "foo")
|
||||
.field("value", "bar");
|
||||
}
|
||||
pipelineBuilder.endObject().endObject();
|
||||
pipelineBuilder.startObject().startObject("convert");
|
||||
{
|
||||
pipelineBuilder
|
||||
.field("field", "rank")
|
||||
.field("type", "integer");
|
||||
}
|
||||
pipelineBuilder.endObject().endObject();
|
||||
}
|
||||
pipelineBuilder.endArray();
|
||||
}
|
||||
pipelineBuilder.endObject();
|
||||
return pipelineBuilder;
|
||||
}
|
||||
|
||||
protected static void createPipeline(String pipelineId) throws IOException {
|
||||
XContentBuilder builder = buildRandomXContentPipeline();
|
||||
createPipeline(new PutPipelineRequest(pipelineId, BytesReference.bytes(builder), builder.contentType()));
|
||||
}
|
||||
|
||||
protected static void createPipeline(PutPipelineRequest putPipelineRequest) throws IOException {
|
||||
assertOK(client().performRequest(RequestConverters.putPipeline(putPipelineRequest)));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -63,6 +63,7 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
|
|||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.MultiGetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.ingest.GetPipelineRequest;
|
||||
import org.elasticsearch.action.ingest.PutPipelineRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.MultiSearchRequest;
|
||||
|
@ -1450,6 +1451,20 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
assertEquals(expectedParams, expectedRequest.getParameters());
|
||||
}
|
||||
|
||||
public void testGetPipeline() {
|
||||
String pipelineId = "some_pipeline_id";
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
GetPipelineRequest request = new GetPipelineRequest("some_pipeline_id");
|
||||
setRandomMasterTimeout(request, expectedParams);
|
||||
Request expectedRequest = RequestConverters.getPipeline(request);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
endpoint.add("_ingest/pipeline");
|
||||
endpoint.add(pipelineId);
|
||||
assertEquals(endpoint.toString(), expectedRequest.getEndpoint());
|
||||
assertEquals(HttpGet.METHOD_NAME, expectedRequest.getMethod());
|
||||
assertEquals(expectedParams, expectedRequest.getParameters());
|
||||
}
|
||||
|
||||
public void testRollover() throws IOException {
|
||||
RolloverRequest rolloverRequest = new RolloverRequest(randomAlphaOfLengthBetween(3, 10),
|
||||
randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10));
|
||||
|
|
|
@ -23,6 +23,8 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
|
||||
import org.elasticsearch.action.ingest.GetPipelineRequest;
|
||||
import org.elasticsearch.action.ingest.GetPipelineResponse;
|
||||
import org.elasticsearch.action.ingest.PutPipelineRequest;
|
||||
import org.elasticsearch.action.ingest.PutPipelineResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
|
@ -34,11 +36,13 @@ import org.elasticsearch.common.unit.ByteSizeUnit;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.ingest.PipelineConfiguration;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
|
@ -257,4 +261,74 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetPipeline() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
createPipeline("my-pipeline-id");
|
||||
}
|
||||
|
||||
{
|
||||
// tag::get-pipeline-request
|
||||
GetPipelineRequest request = new GetPipelineRequest("my-pipeline-id"); // <1>
|
||||
// end::get-pipeline-request
|
||||
|
||||
// tag::get-pipeline-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::get-pipeline-request-masterTimeout
|
||||
|
||||
// tag::get-pipeline-execute
|
||||
GetPipelineResponse response = client.cluster().getPipeline(request); // <1>
|
||||
// end::get-pipeline-execute
|
||||
|
||||
// tag::get-pipeline-response
|
||||
boolean successful = response.isFound(); // <1>
|
||||
List<PipelineConfiguration> pipelines = response.pipelines(); // <2>
|
||||
for(PipelineConfiguration pipeline: pipelines) {
|
||||
Map<String, Object> config = pipeline.getConfigAsMap(); // <3>
|
||||
}
|
||||
// end::get-pipeline-response
|
||||
|
||||
assertTrue(successful);
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetPipelineAsync() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
createPipeline("my-pipeline-id");
|
||||
}
|
||||
|
||||
{
|
||||
GetPipelineRequest request = new GetPipelineRequest("my-pipeline-id");
|
||||
|
||||
// tag::get-pipeline-execute-listener
|
||||
ActionListener<GetPipelineResponse> listener =
|
||||
new ActionListener<GetPipelineResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetPipelineResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-pipeline-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::get-pipeline-execute-async
|
||||
client.cluster().getPipelineAsync(request, listener); // <1>
|
||||
// end::get-pipeline-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,17 +19,11 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
|
@ -42,11 +36,9 @@ public final class Request {
|
|||
private final String method;
|
||||
private final String endpoint;
|
||||
private final Map<String, String> parameters = new HashMap<>();
|
||||
private final List<Header> headers = new ArrayList<>();
|
||||
|
||||
private HttpEntity entity;
|
||||
private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory =
|
||||
HttpAsyncResponseConsumerFactory.DEFAULT;
|
||||
private RequestOptions options = RequestOptions.DEFAULT;
|
||||
|
||||
/**
|
||||
* Create the {@linkplain Request}.
|
||||
|
@ -127,40 +119,29 @@ public final class Request {
|
|||
}
|
||||
|
||||
/**
|
||||
* Add the provided header to the request.
|
||||
* Set the portion of an HTTP request to Elasticsearch that can be
|
||||
* manipulated without changing Elasticsearch's behavior.
|
||||
*/
|
||||
public void addHeader(String name, String value) {
|
||||
Objects.requireNonNull(name, "header name cannot be null");
|
||||
Objects.requireNonNull(value, "header value cannot be null");
|
||||
this.headers.add(new ReqHeader(name, value));
|
||||
public void setOptions(RequestOptions options) {
|
||||
Objects.requireNonNull(options, "options cannot be null");
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
/**
|
||||
* Headers to attach to the request.
|
||||
* Set the portion of an HTTP request to Elasticsearch that can be
|
||||
* manipulated without changing Elasticsearch's behavior.
|
||||
*/
|
||||
List<Header> getHeaders() {
|
||||
return Collections.unmodifiableList(headers);
|
||||
public void setOptions(RequestOptions.Builder options) {
|
||||
Objects.requireNonNull(options, "options cannot be null");
|
||||
this.options = options.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* set the {@link HttpAsyncResponseConsumerFactory} used to create one
|
||||
* {@link HttpAsyncResponseConsumer} callback per retry. Controls how the
|
||||
* response body gets streamed from a non-blocking HTTP connection on the
|
||||
* client side.
|
||||
* Get the portion of an HTTP request to Elasticsearch that can be
|
||||
* manipulated without changing Elasticsearch's behavior.
|
||||
*/
|
||||
public void setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) {
|
||||
this.httpAsyncResponseConsumerFactory =
|
||||
Objects.requireNonNull(httpAsyncResponseConsumerFactory, "httpAsyncResponseConsumerFactory cannot be null");
|
||||
}
|
||||
|
||||
/**
|
||||
* The {@link HttpAsyncResponseConsumerFactory} used to create one
|
||||
* {@link HttpAsyncResponseConsumer} callback per retry. Controls how the
|
||||
* response body gets streamed from a non-blocking HTTP connection on the
|
||||
* client side.
|
||||
*/
|
||||
public HttpAsyncResponseConsumerFactory getHttpAsyncResponseConsumerFactory() {
|
||||
return httpAsyncResponseConsumerFactory;
|
||||
public RequestOptions getOptions() {
|
||||
return options;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -175,18 +156,7 @@ public final class Request {
|
|||
if (entity != null) {
|
||||
b.append(", entity=").append(entity);
|
||||
}
|
||||
if (headers.size() > 0) {
|
||||
b.append(", headers=");
|
||||
for (int h = 0; h < headers.size(); h++) {
|
||||
if (h != 0) {
|
||||
b.append(',');
|
||||
}
|
||||
b.append(headers.get(h).toString());
|
||||
}
|
||||
}
|
||||
if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) {
|
||||
b.append(", consumerFactory=").append(httpAsyncResponseConsumerFactory);
|
||||
}
|
||||
b.append(", options=").append(options);
|
||||
return b.append('}').toString();
|
||||
}
|
||||
|
||||
|
@ -204,40 +174,11 @@ public final class Request {
|
|||
&& endpoint.equals(other.endpoint)
|
||||
&& parameters.equals(other.parameters)
|
||||
&& Objects.equals(entity, other.entity)
|
||||
&& headers.equals(other.headers)
|
||||
&& httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory);
|
||||
&& options.equals(other.options);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(method, endpoint, parameters, entity, headers.hashCode(), httpAsyncResponseConsumerFactory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom implementation of {@link BasicHeader} that overrides equals and hashCode.
|
||||
*/
|
||||
static final class ReqHeader extends BasicHeader {
|
||||
|
||||
ReqHeader(String name, String value) {
|
||||
super(name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
if (other instanceof ReqHeader) {
|
||||
Header otherHeader = (Header) other;
|
||||
return Objects.equals(getName(), otherHeader.getName()) &&
|
||||
Objects.equals(getValue(), otherHeader.getValue());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(getName(), getValue());
|
||||
}
|
||||
return Objects.hash(method, endpoint, parameters, entity, options);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,175 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
/**
|
||||
* The portion of an HTTP request to Elasticsearch that can be
|
||||
* manipulated without changing Elasticsearch's behavior.
|
||||
*/
|
||||
public final class RequestOptions {
|
||||
public static final RequestOptions DEFAULT = new Builder(
|
||||
Collections.<Header>emptyList(), HeapBufferedResponseConsumerFactory.DEFAULT).build();
|
||||
|
||||
private final List<Header> headers;
|
||||
private final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory;
|
||||
|
||||
private RequestOptions(Builder builder) {
|
||||
this.headers = Collections.unmodifiableList(new ArrayList<>(builder.headers));
|
||||
this.httpAsyncResponseConsumerFactory = builder.httpAsyncResponseConsumerFactory;
|
||||
}
|
||||
|
||||
public Builder toBuilder() {
|
||||
Builder builder = new Builder(headers, httpAsyncResponseConsumerFactory);
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Headers to attach to the request.
|
||||
*/
|
||||
public List<Header> getHeaders() {
|
||||
return headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* The {@link HttpAsyncResponseConsumerFactory} used to create one
|
||||
* {@link HttpAsyncResponseConsumer} callback per retry. Controls how the
|
||||
* response body gets streamed from a non-blocking HTTP connection on the
|
||||
* client side.
|
||||
*/
|
||||
public HttpAsyncResponseConsumerFactory getHttpAsyncResponseConsumerFactory() {
|
||||
return httpAsyncResponseConsumerFactory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
b.append("RequestOptions{");
|
||||
if (headers.size() > 0) {
|
||||
b.append(", headers=");
|
||||
for (int h = 0; h < headers.size(); h++) {
|
||||
if (h != 0) {
|
||||
b.append(',');
|
||||
}
|
||||
b.append(headers.get(h).toString());
|
||||
}
|
||||
}
|
||||
if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) {
|
||||
b.append(", consumerFactory=").append(httpAsyncResponseConsumerFactory);
|
||||
}
|
||||
return b.append('}').toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null || (obj.getClass() != getClass())) {
|
||||
return false;
|
||||
}
|
||||
if (obj == this) {
|
||||
return true;
|
||||
}
|
||||
|
||||
RequestOptions other = (RequestOptions) obj;
|
||||
return headers.equals(other.headers)
|
||||
&& httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(headers, httpAsyncResponseConsumerFactory);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
private final List<Header> headers;
|
||||
private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory;
|
||||
|
||||
private Builder(List<Header> headers, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) {
|
||||
this.headers = new ArrayList<>(headers);
|
||||
this.httpAsyncResponseConsumerFactory = httpAsyncResponseConsumerFactory;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the {@linkplain RequestOptions}.
|
||||
*/
|
||||
public RequestOptions build() {
|
||||
return new RequestOptions(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the provided header to the request.
|
||||
*/
|
||||
public void addHeader(String name, String value) {
|
||||
Objects.requireNonNull(name, "header name cannot be null");
|
||||
Objects.requireNonNull(value, "header value cannot be null");
|
||||
this.headers.add(new ReqHeader(name, value));
|
||||
}
|
||||
|
||||
/**
|
||||
* set the {@link HttpAsyncResponseConsumerFactory} used to create one
|
||||
* {@link HttpAsyncResponseConsumer} callback per retry. Controls how the
|
||||
* response body gets streamed from a non-blocking HTTP connection on the
|
||||
* client side.
|
||||
*/
|
||||
public void setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) {
|
||||
this.httpAsyncResponseConsumerFactory =
|
||||
Objects.requireNonNull(httpAsyncResponseConsumerFactory, "httpAsyncResponseConsumerFactory cannot be null");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom implementation of {@link BasicHeader} that overrides equals and
|
||||
* hashCode so it is easier to test equality of {@link RequestOptions}.
|
||||
*/
|
||||
static final class ReqHeader extends BasicHeader {
|
||||
|
||||
ReqHeader(String name, String value) {
|
||||
super(name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
if (other instanceof ReqHeader) {
|
||||
Header otherHeader = (Header) other;
|
||||
return Objects.equals(getName(), otherHeader.getName()) &&
|
||||
Objects.equals(getValue(), otherHeader.getValue());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(getName(), getValue());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -61,6 +61,7 @@ import java.util.Comparator;
|
|||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
@ -132,7 +133,7 @@ public class RestClient implements Closeable {
|
|||
if (hosts == null || hosts.length == 0) {
|
||||
throw new IllegalArgumentException("hosts must not be null nor empty");
|
||||
}
|
||||
Set<HttpHost> httpHosts = new HashSet<>();
|
||||
Set<HttpHost> httpHosts = new LinkedHashSet<>();
|
||||
AuthCache authCache = new BasicAuthCache();
|
||||
for (HttpHost host : hosts) {
|
||||
Objects.requireNonNull(host, "host cannot be null");
|
||||
|
@ -143,6 +144,13 @@ public class RestClient implements Closeable {
|
|||
this.blacklist.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the configured hosts
|
||||
*/
|
||||
public List<HttpHost> getHosts() {
|
||||
return new ArrayList<>(hostTuple.hosts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the Elasticsearch cluster that the client points to.
|
||||
* Blocks until the request is completed and returns its response or fails
|
||||
|
@ -304,8 +312,7 @@ public class RestClient implements Closeable {
|
|||
Request request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setEntity(entity);
|
||||
request.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory);
|
||||
addHeaders(request, headers);
|
||||
setOptions(request, httpAsyncResponseConsumerFactory, headers);
|
||||
return performRequest(request);
|
||||
}
|
||||
|
||||
|
@ -419,8 +426,7 @@ public class RestClient implements Closeable {
|
|||
request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setEntity(entity);
|
||||
request.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory);
|
||||
addHeaders(request, headers);
|
||||
setOptions(request, httpAsyncResponseConsumerFactory, headers);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
return;
|
||||
|
@ -457,11 +463,11 @@ public class RestClient implements Closeable {
|
|||
}
|
||||
URI uri = buildUri(pathPrefix, request.getEndpoint(), requestParams);
|
||||
HttpRequestBase httpRequest = createHttpRequest(request.getMethod(), uri, request.getEntity());
|
||||
setHeaders(httpRequest, request.getHeaders());
|
||||
setHeaders(httpRequest, request.getOptions().getHeaders());
|
||||
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(listener);
|
||||
long startTime = System.nanoTime();
|
||||
performRequestAsync(startTime, nextHost(), httpRequest, ignoreErrorCodes,
|
||||
request.getHttpAsyncResponseConsumerFactory(), failureTrackingResponseListener);
|
||||
request.getOptions().getHttpAsyncResponseConsumerFactory(), failureTrackingResponseListener);
|
||||
}
|
||||
|
||||
private void performRequestAsync(final long startTime, final HostTuple<Iterator<HttpHost>> hostTuple, final HttpRequestBase request,
|
||||
|
@ -883,11 +889,24 @@ public class RestClient implements Closeable {
|
|||
*/
|
||||
@Deprecated
|
||||
private static void addHeaders(Request request, Header... headers) {
|
||||
setOptions(request, RequestOptions.DEFAULT.getHttpAsyncResponseConsumerFactory(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add all headers from the provided varargs argument to a {@link Request}. This only exists
|
||||
* to support methods that exist for backwards compatibility.
|
||||
*/
|
||||
@Deprecated
|
||||
private static void setOptions(Request request, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
|
||||
Header... headers) {
|
||||
Objects.requireNonNull(headers, "headers cannot be null");
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
for (Header header : headers) {
|
||||
Objects.requireNonNull(header, "header cannot be null");
|
||||
request.addHeader(header.getName(), header.getValue());
|
||||
options.addHeader(header.getName(), header.getValue());
|
||||
}
|
||||
options.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory);
|
||||
request.setOptions(options);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,142 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertSame;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
public class RequestOptionsTests extends RestClientTestCase {
|
||||
public void testDefault() {
|
||||
assertEquals(Collections.<Header>emptyList(), RequestOptions.DEFAULT.getHeaders());
|
||||
assertEquals(HttpAsyncResponseConsumerFactory.DEFAULT, RequestOptions.DEFAULT.getHttpAsyncResponseConsumerFactory());
|
||||
assertEquals(RequestOptions.DEFAULT, RequestOptions.DEFAULT.toBuilder().build());
|
||||
}
|
||||
|
||||
public void testAddHeader() {
|
||||
try {
|
||||
randomBuilder().addHeader(null, randomAsciiLettersOfLengthBetween(3, 10));
|
||||
fail("expected failure");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("header name cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
randomBuilder().addHeader(randomAsciiLettersOfLengthBetween(3, 10), null);
|
||||
fail("expected failure");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("header value cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder();
|
||||
int numHeaders = between(0, 5);
|
||||
List<Header> headers = new ArrayList<>();
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
Header header = new RequestOptions.ReqHeader(randomAsciiAlphanumOfLengthBetween(5, 10), randomAsciiAlphanumOfLength(3));
|
||||
headers.add(header);
|
||||
builder.addHeader(header.getName(), header.getValue());
|
||||
}
|
||||
RequestOptions options = builder.build();
|
||||
assertEquals(headers, options.getHeaders());
|
||||
|
||||
try {
|
||||
options.getHeaders().add(
|
||||
new RequestOptions.ReqHeader(randomAsciiAlphanumOfLengthBetween(5, 10), randomAsciiAlphanumOfLength(3)));
|
||||
fail("expected failure");
|
||||
} catch (UnsupportedOperationException e) {
|
||||
assertNull(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSetHttpAsyncResponseConsumerFactory() {
|
||||
try {
|
||||
RequestOptions.DEFAULT.toBuilder().setHttpAsyncResponseConsumerFactory(null);
|
||||
fail("expected failure");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("httpAsyncResponseConsumerFactory cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
HttpAsyncResponseConsumerFactory factory = mock(HttpAsyncResponseConsumerFactory.class);
|
||||
RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder();
|
||||
builder.setHttpAsyncResponseConsumerFactory(factory);
|
||||
RequestOptions options = builder.build();
|
||||
assertSame(factory, options.getHttpAsyncResponseConsumerFactory());
|
||||
}
|
||||
|
||||
public void testEqualsAndHashCode() {
|
||||
RequestOptions request = randomBuilder().build();
|
||||
assertEquals(request, request);
|
||||
|
||||
RequestOptions copy = copy(request);
|
||||
assertEquals(request, copy);
|
||||
assertEquals(copy, request);
|
||||
assertEquals(request.hashCode(), copy.hashCode());
|
||||
|
||||
RequestOptions mutant = mutate(request);
|
||||
assertNotEquals(request, mutant);
|
||||
assertNotEquals(mutant, request);
|
||||
}
|
||||
|
||||
static RequestOptions.Builder randomBuilder() {
|
||||
RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder();
|
||||
|
||||
if (randomBoolean()) {
|
||||
int headerCount = between(1, 5);
|
||||
for (int i = 0; i < headerCount; i++) {
|
||||
builder.addHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3));
|
||||
}
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
builder.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1));
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
|
||||
private static RequestOptions copy(RequestOptions options) {
|
||||
return options.toBuilder().build();
|
||||
}
|
||||
|
||||
private static RequestOptions mutate(RequestOptions options) {
|
||||
RequestOptions.Builder mutant = options.toBuilder();
|
||||
int mutationType = between(0, 1);
|
||||
switch (mutationType) {
|
||||
case 0:
|
||||
mutant.addHeader("extra", "m");
|
||||
return mutant.build();
|
||||
case 1:
|
||||
mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5));
|
||||
return mutant.build();
|
||||
default:
|
||||
throw new UnsupportedOperationException("Unknown mutation type [" + mutationType + "]");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -37,6 +37,7 @@ import java.util.Map;
|
|||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertSame;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class RequestTests extends RestClientTestCase {
|
||||
|
@ -127,33 +128,33 @@ public class RequestTests extends RestClientTestCase {
|
|||
assertEquals(json, new String(os.toByteArray(), ContentType.APPLICATION_JSON.getCharset()));
|
||||
}
|
||||
|
||||
public void testAddHeader() {
|
||||
public void testSetOptions() {
|
||||
final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"});
|
||||
final String endpoint = randomAsciiLettersOfLengthBetween(1, 10);
|
||||
Request request = new Request(method, endpoint);
|
||||
|
||||
try {
|
||||
request.addHeader(null, randomAsciiLettersOfLengthBetween(3, 10));
|
||||
request.setOptions((RequestOptions) null);
|
||||
fail("expected failure");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("header name cannot be null", e.getMessage());
|
||||
assertEquals("options cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
request.addHeader(randomAsciiLettersOfLengthBetween(3, 10), null);
|
||||
request.setOptions((RequestOptions.Builder) null);
|
||||
fail("expected failure");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("header value cannot be null", e.getMessage());
|
||||
assertEquals("options cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
int numHeaders = between(0, 5);
|
||||
List<Header> headers = new ArrayList<>();
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
Header header = new Request.ReqHeader(randomAsciiAlphanumOfLengthBetween(5, 10), randomAsciiAlphanumOfLength(3));
|
||||
headers.add(header);
|
||||
request.addHeader(header.getName(), header.getValue());
|
||||
}
|
||||
assertEquals(headers, new ArrayList<>(request.getHeaders()));
|
||||
RequestOptions.Builder builder = RequestOptionsTests.randomBuilder();
|
||||
request.setOptions(builder);
|
||||
assertEquals(builder.build(), request.getOptions());
|
||||
|
||||
builder = RequestOptionsTests.randomBuilder();
|
||||
RequestOptions options = builder.build();
|
||||
request.setOptions(options);
|
||||
assertSame(options, request.getOptions());
|
||||
}
|
||||
|
||||
public void testEqualsAndHashCode() {
|
||||
|
@ -193,14 +194,9 @@ public class RequestTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
int headerCount = between(1, 5);
|
||||
for (int i = 0; i < headerCount; i++) {
|
||||
request.addHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3));
|
||||
}
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
request.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1));
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1));
|
||||
request.setOptions(options);
|
||||
}
|
||||
|
||||
return request;
|
||||
|
@ -222,7 +218,7 @@ public class RequestTests extends RestClientTestCase {
|
|||
return mutant;
|
||||
}
|
||||
Request mutant = copy(request);
|
||||
int mutationType = between(0, 3);
|
||||
int mutationType = between(0, 2);
|
||||
switch (mutationType) {
|
||||
case 0:
|
||||
mutant.addParameter(randomAsciiAlphanumOfLength(mutant.getParameters().size() + 4), "extra");
|
||||
|
@ -231,10 +227,9 @@ public class RequestTests extends RestClientTestCase {
|
|||
mutant.setJsonEntity("mutant"); // randomRequest can't produce this value
|
||||
return mutant;
|
||||
case 2:
|
||||
mutant.addHeader("extra", "m");
|
||||
return mutant;
|
||||
case 3:
|
||||
mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5));
|
||||
RequestOptions.Builder options = mutant.getOptions().toBuilder();
|
||||
options.addHeader("extra", "m");
|
||||
mutant.setOptions(options);
|
||||
return mutant;
|
||||
default:
|
||||
throw new UnsupportedOperationException("Unknown mutation type [" + mutationType + "]");
|
||||
|
@ -246,9 +241,6 @@ public class RequestTests extends RestClientTestCase {
|
|||
to.addParameter(param.getKey(), param.getValue());
|
||||
}
|
||||
to.setEntity(from.getEntity());
|
||||
for (Header header : from.getHeaders()) {
|
||||
to.addHeader(header.getName(), header.getValue());
|
||||
}
|
||||
to.setHttpAsyncResponseConsumerFactory(from.getHttpAsyncResponseConsumerFactory());
|
||||
to.setOptions(from.getOptions());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -378,9 +378,11 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
|
|||
String requestBody = "{ \"field\": \"value\" }";
|
||||
Request request = new Request(method, "/" + statusCode);
|
||||
request.setJsonEntity(requestBody);
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
for (Header header : headers) {
|
||||
request.addHeader(header.getName(), header.getValue());
|
||||
options.addHeader(header.getName(), header.getValue());
|
||||
}
|
||||
request.setOptions(options);
|
||||
Response esResponse;
|
||||
try {
|
||||
esResponse = restClient.performRequest(request);
|
||||
|
|
|
@ -362,9 +362,11 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header");
|
||||
final int statusCode = randomStatusCode(getRandom());
|
||||
Request request = new Request(method, "/" + statusCode);
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
for (Header requestHeader : requestHeaders) {
|
||||
request.addHeader(requestHeader.getName(), requestHeader.getValue());
|
||||
options.addHeader(requestHeader.getName(), requestHeader.getValue());
|
||||
}
|
||||
request.setOptions(options);
|
||||
Response esResponse;
|
||||
try {
|
||||
esResponse = restClient.performRequest(request);
|
||||
|
@ -438,11 +440,13 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
final Set<String> uniqueNames = new HashSet<>();
|
||||
if (randomBoolean()) {
|
||||
Header[] headers = RestClientTestUtil.randomHeaders(getRandom(), "Header");
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
for (Header header : headers) {
|
||||
request.addHeader(header.getName(), header.getValue());
|
||||
expectedRequest.addHeader(new Request.ReqHeader(header.getName(), header.getValue()));
|
||||
options.addHeader(header.getName(), header.getValue());
|
||||
expectedRequest.addHeader(new RequestOptions.ReqHeader(header.getName(), header.getValue()));
|
||||
uniqueNames.add(header.getName());
|
||||
}
|
||||
request.setOptions(options);
|
||||
}
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
// request level headers override default headers
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -251,6 +252,37 @@ public class RestClientTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testSetHostsPreservesOrdering() throws Exception {
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
HttpHost[] hosts = randomHosts();
|
||||
restClient.setHosts(hosts);
|
||||
assertEquals(Arrays.asList(hosts), restClient.getHosts());
|
||||
}
|
||||
}
|
||||
|
||||
private static HttpHost[] randomHosts() {
|
||||
int numHosts = randomIntBetween(1, 10);
|
||||
HttpHost[] hosts = new HttpHost[numHosts];
|
||||
for (int i = 0; i < hosts.length; i++) {
|
||||
hosts[i] = new HttpHost("host-" + i, 9200);
|
||||
}
|
||||
return hosts;
|
||||
}
|
||||
|
||||
public void testSetHostsDuplicatedHosts() throws Exception {
|
||||
try (RestClient restClient = createRestClient()) {
|
||||
int numHosts = randomIntBetween(1, 10);
|
||||
HttpHost[] hosts = new HttpHost[numHosts];
|
||||
HttpHost host = new HttpHost("host", 9200);
|
||||
for (int i = 0; i < hosts.length; i++) {
|
||||
hosts[i] = host;
|
||||
}
|
||||
restClient.setHosts(hosts);
|
||||
assertEquals(1, restClient.getHosts().size());
|
||||
assertEquals(host, restClient.getHosts().get(0));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testConstructor()}.
|
||||
*/
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.http.ssl.SSLContexts;
|
|||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.HttpAsyncResponseConsumerFactory;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseListener;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
|
@ -171,14 +172,22 @@ public class RestClientDocumentation {
|
|||
//tag::rest-client-body-shorter
|
||||
request.setJsonEntity("{\"json\":\"text\"}");
|
||||
//end::rest-client-body-shorter
|
||||
//tag::rest-client-headers
|
||||
request.addHeader("Accept", "text/plain");
|
||||
request.addHeader("Cache-Control", "no-cache");
|
||||
//end::rest-client-headers
|
||||
//tag::rest-client-response-consumer
|
||||
request.setHttpAsyncResponseConsumerFactory(
|
||||
new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024));
|
||||
//end::rest-client-response-consumer
|
||||
{
|
||||
//tag::rest-client-headers
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.addHeader("Accept", "text/plain");
|
||||
options.addHeader("Cache-Control", "no-cache");
|
||||
request.setOptions(options);
|
||||
//end::rest-client-headers
|
||||
}
|
||||
{
|
||||
//tag::rest-client-response-consumer
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.setHttpAsyncResponseConsumerFactory(
|
||||
new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024));
|
||||
request.setOptions(options);
|
||||
//end::rest-client-response-consumer
|
||||
}
|
||||
}
|
||||
{
|
||||
HttpEntity[] documents = new HttpEntity[10];
|
||||
|
|
|
@ -58,7 +58,6 @@ public class SniffOnFailureListener extends RestClient.FailureListener {
|
|||
if (sniffer == null) {
|
||||
throw new IllegalStateException("sniffer was not set, unable to sniff on failure");
|
||||
}
|
||||
//re-sniff immediately but take out the node that failed
|
||||
sniffer.sniffOnFailure(host);
|
||||
sniffer.sniffOnFailure();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,12 +31,14 @@ import java.security.AccessController;
|
|||
import java.security.PrivilegedAction;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* Class responsible for sniffing nodes from some source (default is elasticsearch itself) and setting them to a provided instance of
|
||||
|
@ -51,101 +53,175 @@ public class Sniffer implements Closeable {
|
|||
private static final Log logger = LogFactory.getLog(Sniffer.class);
|
||||
private static final String SNIFFER_THREAD_NAME = "es_rest_client_sniffer";
|
||||
|
||||
private final Task task;
|
||||
private final HostsSniffer hostsSniffer;
|
||||
private final RestClient restClient;
|
||||
private final long sniffIntervalMillis;
|
||||
private final long sniffAfterFailureDelayMillis;
|
||||
private final Scheduler scheduler;
|
||||
private final AtomicBoolean initialized = new AtomicBoolean(false);
|
||||
private volatile ScheduledTask nextScheduledTask;
|
||||
|
||||
Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, long sniffAfterFailureDelay) {
|
||||
this.task = new Task(hostsSniffer, restClient, sniffInterval, sniffAfterFailureDelay);
|
||||
this(restClient, hostsSniffer, new DefaultScheduler(), sniffInterval, sniffAfterFailureDelay);
|
||||
}
|
||||
|
||||
Sniffer(RestClient restClient, HostsSniffer hostsSniffer, Scheduler scheduler, long sniffInterval, long sniffAfterFailureDelay) {
|
||||
this.hostsSniffer = hostsSniffer;
|
||||
this.restClient = restClient;
|
||||
this.sniffIntervalMillis = sniffInterval;
|
||||
this.sniffAfterFailureDelayMillis = sniffAfterFailureDelay;
|
||||
this.scheduler = scheduler;
|
||||
/*
|
||||
* The first sniffing round is async, so this constructor returns before nextScheduledTask is assigned to a task.
|
||||
* The initialized flag is a protection against NPE due to that.
|
||||
*/
|
||||
Task task = new Task(sniffIntervalMillis) {
|
||||
@Override
|
||||
public void run() {
|
||||
super.run();
|
||||
initialized.compareAndSet(false, true);
|
||||
}
|
||||
};
|
||||
/*
|
||||
* We do not keep track of the returned future as we never intend to cancel the initial sniffing round, we rather
|
||||
* prevent any other operation from being executed till the sniffer is properly initialized
|
||||
*/
|
||||
scheduler.schedule(task, 0L);
|
||||
}
|
||||
|
||||
/**
|
||||
* Triggers a new sniffing round and explicitly takes out the failed host provided as argument
|
||||
* Schedule sniffing to run as soon as possible if it isn't already running. Once such sniffing round runs
|
||||
* it will also schedule a new round after sniffAfterFailureDelay ms.
|
||||
*/
|
||||
public void sniffOnFailure(HttpHost failedHost) {
|
||||
this.task.sniffOnFailure(failedHost);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
task.shutdown();
|
||||
}
|
||||
|
||||
private static class Task implements Runnable {
|
||||
private final HostsSniffer hostsSniffer;
|
||||
private final RestClient restClient;
|
||||
|
||||
private final long sniffIntervalMillis;
|
||||
private final long sniffAfterFailureDelayMillis;
|
||||
private final ScheduledExecutorService scheduledExecutorService;
|
||||
private final AtomicBoolean running = new AtomicBoolean(false);
|
||||
private ScheduledFuture<?> scheduledFuture;
|
||||
|
||||
private Task(HostsSniffer hostsSniffer, RestClient restClient, long sniffIntervalMillis, long sniffAfterFailureDelayMillis) {
|
||||
this.hostsSniffer = hostsSniffer;
|
||||
this.restClient = restClient;
|
||||
this.sniffIntervalMillis = sniffIntervalMillis;
|
||||
this.sniffAfterFailureDelayMillis = sniffAfterFailureDelayMillis;
|
||||
SnifferThreadFactory threadFactory = new SnifferThreadFactory(SNIFFER_THREAD_NAME);
|
||||
this.scheduledExecutorService = Executors.newScheduledThreadPool(1, threadFactory);
|
||||
scheduleNextRun(0);
|
||||
}
|
||||
|
||||
synchronized void scheduleNextRun(long delayMillis) {
|
||||
if (scheduledExecutorService.isShutdown() == false) {
|
||||
try {
|
||||
if (scheduledFuture != null) {
|
||||
//regardless of when the next sniff is scheduled, cancel it and schedule a new one with updated delay
|
||||
this.scheduledFuture.cancel(false);
|
||||
}
|
||||
logger.debug("scheduling next sniff in " + delayMillis + " ms");
|
||||
this.scheduledFuture = this.scheduledExecutorService.schedule(this, delayMillis, TimeUnit.MILLISECONDS);
|
||||
} catch(Exception e) {
|
||||
logger.error("error while scheduling next sniffer task", e);
|
||||
}
|
||||
public void sniffOnFailure() {
|
||||
//sniffOnFailure does nothing until the initial sniffing round has been completed
|
||||
if (initialized.get()) {
|
||||
/*
|
||||
* If sniffing is already running, there is no point in scheduling another round right after the current one.
|
||||
* Concurrent calls may be checking the same task state, but only the first skip call on the same task returns true.
|
||||
* The task may also get replaced while we check its state, in which case calling skip on it returns false.
|
||||
*/
|
||||
if (this.nextScheduledTask.skip()) {
|
||||
/*
|
||||
* We do not keep track of this future as the task will immediately run and we don't intend to cancel it
|
||||
* due to concurrent sniffOnFailure runs. Effectively the previous (now cancelled or skipped) task will stay
|
||||
* assigned to nextTask till this onFailure round gets run and schedules its corresponding afterFailure round.
|
||||
*/
|
||||
scheduler.schedule(new Task(sniffAfterFailureDelayMillis), 0L);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum TaskState {
|
||||
WAITING, SKIPPED, STARTED
|
||||
}
|
||||
|
||||
class Task implements Runnable {
|
||||
final long nextTaskDelay;
|
||||
final AtomicReference<TaskState> taskState = new AtomicReference<>(TaskState.WAITING);
|
||||
|
||||
Task(long nextTaskDelay) {
|
||||
this.nextTaskDelay = nextTaskDelay;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
sniff(null, sniffIntervalMillis);
|
||||
}
|
||||
|
||||
void sniffOnFailure(HttpHost failedHost) {
|
||||
sniff(failedHost, sniffAfterFailureDelayMillis);
|
||||
}
|
||||
|
||||
void sniff(HttpHost excludeHost, long nextSniffDelayMillis) {
|
||||
if (running.compareAndSet(false, true)) {
|
||||
try {
|
||||
List<HttpHost> sniffedHosts = hostsSniffer.sniffHosts();
|
||||
logger.debug("sniffed hosts: " + sniffedHosts);
|
||||
if (excludeHost != null) {
|
||||
sniffedHosts.remove(excludeHost);
|
||||
}
|
||||
if (sniffedHosts.isEmpty()) {
|
||||
logger.warn("no hosts to set, hosts will be updated at the next sniffing round");
|
||||
} else {
|
||||
this.restClient.setHosts(sniffedHosts.toArray(new HttpHost[sniffedHosts.size()]));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("error while sniffing nodes", e);
|
||||
} finally {
|
||||
scheduleNextRun(nextSniffDelayMillis);
|
||||
running.set(false);
|
||||
}
|
||||
/*
|
||||
* Skipped or already started tasks do nothing. In most cases tasks will be cancelled and not run, but we want to protect for
|
||||
* cases where future#cancel returns true yet the task runs. We want to make sure that such tasks do nothing otherwise they will
|
||||
* schedule another round at the end and so on, leaving us with multiple parallel sniffing "tracks" whish is undesirable.
|
||||
*/
|
||||
if (taskState.compareAndSet(TaskState.WAITING, TaskState.STARTED) == false) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
synchronized void shutdown() {
|
||||
scheduledExecutorService.shutdown();
|
||||
try {
|
||||
if (scheduledExecutorService.awaitTermination(1000, TimeUnit.MILLISECONDS)) {
|
||||
return;
|
||||
}
|
||||
scheduledExecutorService.shutdownNow();
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
sniff();
|
||||
} catch (Exception e) {
|
||||
logger.error("error while sniffing nodes", e);
|
||||
} finally {
|
||||
Task task = new Task(sniffIntervalMillis);
|
||||
Future<?> future = scheduler.schedule(task, nextTaskDelay);
|
||||
//tasks are run by a single threaded executor, so swapping is safe with a simple volatile variable
|
||||
ScheduledTask previousTask = nextScheduledTask;
|
||||
nextScheduledTask = new ScheduledTask(task, future);
|
||||
assert initialized.get() == false ||
|
||||
previousTask.task.isSkipped() || previousTask.task.hasStarted() : "task that we are replacing is neither " +
|
||||
"cancelled nor has it ever started";
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the task has started, false in case it didn't start (yet?) or it was skipped
|
||||
*/
|
||||
boolean hasStarted() {
|
||||
return taskState.get() == TaskState.STARTED;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets this task to be skipped. Returns true if the task will be skipped, false if the task has already started.
|
||||
*/
|
||||
boolean skip() {
|
||||
/*
|
||||
* Threads may still get run although future#cancel returns true. We make sure that a task is either cancelled (or skipped),
|
||||
* or entirely run. In the odd case that future#cancel returns true and the thread still runs, the task won't do anything.
|
||||
* In case future#cancel returns true but the task has already started, this state change will not succeed hence this method
|
||||
* returns false and the task will normally run.
|
||||
*/
|
||||
return taskState.compareAndSet(TaskState.WAITING, TaskState.SKIPPED);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the task was set to be skipped before it was started
|
||||
*/
|
||||
boolean isSkipped() {
|
||||
return taskState.get() == TaskState.SKIPPED;
|
||||
}
|
||||
}
|
||||
|
||||
static final class ScheduledTask {
|
||||
final Task task;
|
||||
final Future<?> future;
|
||||
|
||||
ScheduledTask(Task task, Future<?> future) {
|
||||
this.task = task;
|
||||
this.future = future;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancels this task. Returns true if the task has been successfully cancelled, meaning it won't be executed
|
||||
* or if it is its execution won't have any effect. Returns false if the task cannot be cancelled (possibly it was
|
||||
* already cancelled or already completed).
|
||||
*/
|
||||
boolean skip() {
|
||||
/*
|
||||
* Future#cancel should return false whenever a task cannot be cancelled, most likely as it has already started. We don't
|
||||
* trust it much though so we try to cancel hoping that it will work. At the same time we always call skip too, which means
|
||||
* that if the task has already started the state change will fail. We could potentially not call skip when cancel returns
|
||||
* false but we prefer to stay on the safe side.
|
||||
*/
|
||||
future.cancel(false);
|
||||
return task.skip();
|
||||
}
|
||||
}
|
||||
|
||||
final void sniff() throws IOException {
|
||||
List<HttpHost> sniffedHosts = hostsSniffer.sniffHosts();
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("sniffed hosts: " + sniffedHosts);
|
||||
}
|
||||
if (sniffedHosts.isEmpty()) {
|
||||
logger.warn("no hosts to set, hosts will be updated at the next sniffing round");
|
||||
} else {
|
||||
restClient.setHosts(sniffedHosts.toArray(new HttpHost[sniffedHosts.size()]));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (initialized.get()) {
|
||||
nextScheduledTask.skip();
|
||||
}
|
||||
this.scheduler.shutdown();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -158,8 +234,62 @@ public class Sniffer implements Closeable {
|
|||
return new SnifferBuilder(restClient);
|
||||
}
|
||||
|
||||
private static class SnifferThreadFactory implements ThreadFactory {
|
||||
/**
|
||||
* The Scheduler interface allows to isolate the sniffing scheduling aspects so that we can test
|
||||
* the sniffer by injecting when needed a custom scheduler that is more suited for testing.
|
||||
*/
|
||||
interface Scheduler {
|
||||
/**
|
||||
* Schedules the provided {@link Runnable} to be executed in <code>delayMillis</code> milliseconds
|
||||
*/
|
||||
Future<?> schedule(Task task, long delayMillis);
|
||||
|
||||
/**
|
||||
* Shuts this scheduler down
|
||||
*/
|
||||
void shutdown();
|
||||
}
|
||||
|
||||
/**
|
||||
* Default implementation of {@link Scheduler}, based on {@link ScheduledExecutorService}
|
||||
*/
|
||||
static final class DefaultScheduler implements Scheduler {
|
||||
final ScheduledExecutorService executor;
|
||||
|
||||
DefaultScheduler() {
|
||||
this(initScheduledExecutorService());
|
||||
}
|
||||
|
||||
DefaultScheduler(ScheduledExecutorService executor) {
|
||||
this.executor = executor;
|
||||
}
|
||||
|
||||
private static ScheduledExecutorService initScheduledExecutorService() {
|
||||
ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1, new SnifferThreadFactory(SNIFFER_THREAD_NAME));
|
||||
executor.setRemoveOnCancelPolicy(true);
|
||||
return executor;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Future<?> schedule(Task task, long delayMillis) {
|
||||
return executor.schedule(task, delayMillis, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
executor.shutdown();
|
||||
try {
|
||||
if (executor.awaitTermination(1000, TimeUnit.MILLISECONDS)) {
|
||||
return;
|
||||
}
|
||||
executor.shutdownNow();
|
||||
} catch (InterruptedException ignore) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class SnifferThreadFactory implements ThreadFactory {
|
||||
private final AtomicInteger threadNumber = new AtomicInteger(1);
|
||||
private final String namePrefix;
|
||||
private final ThreadFactory originalThreadFactory;
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.client.sniff;
|
|||
|
||||
import org.apache.http.HttpHost;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
|
@ -30,7 +29,7 @@ import java.util.List;
|
|||
*/
|
||||
class MockHostsSniffer implements HostsSniffer {
|
||||
@Override
|
||||
public List<HttpHost> sniffHosts() throws IOException {
|
||||
public List<HttpHost> sniffHosts() {
|
||||
return Collections.singletonList(new HttpHost("localhost", 9200));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,656 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.sniff;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientTestCase;
|
||||
import org.elasticsearch.client.sniff.Sniffer.DefaultScheduler;
|
||||
import org.elasticsearch.client.sniff.Sniffer.Scheduler;
|
||||
import org.mockito.Matchers;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CancellationException;
|
||||
import java.util.concurrent.CopyOnWriteArraySet;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertSame;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyNoMoreInteractions;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class SnifferTests extends RestClientTestCase {
|
||||
|
||||
/**
|
||||
* Tests the {@link Sniffer#sniff()} method in isolation. Verifies that it uses the {@link HostsSniffer} implementation
|
||||
* to retrieve nodes and set them (when not empty) to the provided {@link RestClient} instance.
|
||||
*/
|
||||
public void testSniff() throws IOException {
|
||||
HttpHost initialHost = new HttpHost("localhost", 9200);
|
||||
try (RestClient restClient = RestClient.builder(initialHost).build()) {
|
||||
Scheduler noOpScheduler = new Scheduler() {
|
||||
@Override
|
||||
public Future<?> schedule(Sniffer.Task task, long delayMillis) {
|
||||
return mock(Future.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
|
||||
}
|
||||
};
|
||||
CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
|
||||
int iters = randomIntBetween(5, 30);
|
||||
try (Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 1000L, -1)){
|
||||
{
|
||||
assertEquals(1, restClient.getHosts().size());
|
||||
HttpHost httpHost = restClient.getHosts().get(0);
|
||||
assertEquals("localhost", httpHost.getHostName());
|
||||
assertEquals(9200, httpHost.getPort());
|
||||
}
|
||||
int emptyList = 0;
|
||||
int failures = 0;
|
||||
int runs = 0;
|
||||
List<HttpHost> lastHosts = Collections.singletonList(initialHost);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
try {
|
||||
runs++;
|
||||
sniffer.sniff();
|
||||
if (hostsSniffer.failures.get() > failures) {
|
||||
failures++;
|
||||
fail("should have failed given that hostsSniffer says it threw an exception");
|
||||
} else if (hostsSniffer.emptyList.get() > emptyList) {
|
||||
emptyList++;
|
||||
assertEquals(lastHosts, restClient.getHosts());
|
||||
} else {
|
||||
assertNotEquals(lastHosts, restClient.getHosts());
|
||||
List<HttpHost> expectedHosts = CountingHostsSniffer.buildHosts(runs);
|
||||
assertEquals(expectedHosts, restClient.getHosts());
|
||||
lastHosts = restClient.getHosts();
|
||||
}
|
||||
} catch(IOException e) {
|
||||
if (hostsSniffer.failures.get() > failures) {
|
||||
failures++;
|
||||
assertEquals("communication breakdown", e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
assertEquals(hostsSniffer.emptyList.get(), emptyList);
|
||||
assertEquals(hostsSniffer.failures.get(), failures);
|
||||
assertEquals(hostsSniffer.runs.get(), runs);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link HostsSniffer}.
|
||||
* Simulates the ordinary behaviour of {@link Sniffer} when sniffing on failure is not enabled.
|
||||
* The {@link CountingHostsSniffer} doesn't make any network connection but may throw exception or return no hosts, which makes
|
||||
* it possible to verify that errors are properly handled and don't affect subsequent runs and their scheduling.
|
||||
* The {@link Scheduler} implementation submits rather than scheduling tasks, meaning that it doesn't respect the requested sniff
|
||||
* delays while allowing to assert that the requested delays for each requested run and the following one are the expected values.
|
||||
*/
|
||||
public void testOrdinarySniffRounds() throws Exception {
|
||||
final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
|
||||
long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
|
||||
RestClient restClient = mock(RestClient.class);
|
||||
CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
|
||||
final int iters = randomIntBetween(30, 100);
|
||||
final Set<Future<?>> futures = new CopyOnWriteArraySet<>();
|
||||
final CountDownLatch completionLatch = new CountDownLatch(1);
|
||||
final AtomicInteger runs = new AtomicInteger(iters);
|
||||
final ExecutorService executor = Executors.newSingleThreadExecutor();
|
||||
final AtomicReference<Future<?>> lastFuture = new AtomicReference<>();
|
||||
final AtomicReference<Sniffer.Task> lastTask = new AtomicReference<>();
|
||||
Scheduler scheduler = new Scheduler() {
|
||||
@Override
|
||||
public Future<?> schedule(Sniffer.Task task, long delayMillis) {
|
||||
assertEquals(sniffInterval, task.nextTaskDelay);
|
||||
int numberOfRuns = runs.getAndDecrement();
|
||||
if (numberOfRuns == iters) {
|
||||
//the first call is to schedule the first sniff round from the Sniffer constructor, with delay O
|
||||
assertEquals(0L, delayMillis);
|
||||
assertEquals(sniffInterval, task.nextTaskDelay);
|
||||
} else {
|
||||
//all of the subsequent times "schedule" is called with delay set to the configured sniff interval
|
||||
assertEquals(sniffInterval, delayMillis);
|
||||
assertEquals(sniffInterval, task.nextTaskDelay);
|
||||
if (numberOfRuns == 0) {
|
||||
completionLatch.countDown();
|
||||
return null;
|
||||
}
|
||||
}
|
||||
//we submit rather than scheduling to make the test quick and not depend on time
|
||||
Future<?> future = executor.submit(task);
|
||||
futures.add(future);
|
||||
if (numberOfRuns == 1) {
|
||||
lastFuture.set(future);
|
||||
lastTask.set(task);
|
||||
}
|
||||
return future;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
//the executor is closed externally, shutdown is tested separately
|
||||
}
|
||||
};
|
||||
try {
|
||||
new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
|
||||
assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS));
|
||||
assertEquals(iters, futures.size());
|
||||
//the last future is the only one that may not be completed yet, as the count down happens
|
||||
//while scheduling the next round which is still part of the execution of the runnable itself.
|
||||
assertTrue(lastTask.get().hasStarted());
|
||||
lastFuture.get().get();
|
||||
for (Future<?> future : futures) {
|
||||
assertTrue(future.isDone());
|
||||
future.get();
|
||||
}
|
||||
} finally {
|
||||
executor.shutdown();
|
||||
assertTrue(executor.awaitTermination(1000, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
int totalRuns = hostsSniffer.runs.get();
|
||||
assertEquals(iters, totalRuns);
|
||||
int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get();
|
||||
verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg());
|
||||
verifyNoMoreInteractions(restClient);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that {@link Sniffer#close()} shuts down the underlying {@link Scheduler}, and that such calls are idempotent.
|
||||
* Also verifies that the next scheduled round gets cancelled.
|
||||
*/
|
||||
public void testClose() {
|
||||
final Future<?> future = mock(Future.class);
|
||||
long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
|
||||
long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
|
||||
RestClient restClient = mock(RestClient.class);
|
||||
final AtomicInteger shutdown = new AtomicInteger(0);
|
||||
final AtomicBoolean initialized = new AtomicBoolean(false);
|
||||
Scheduler scheduler = new Scheduler() {
|
||||
@Override
|
||||
public Future<?> schedule(Sniffer.Task task, long delayMillis) {
|
||||
if (initialized.compareAndSet(false, true)) {
|
||||
//run from the same thread so the sniffer gets for sure initialized and the scheduled task gets cancelled on close
|
||||
task.run();
|
||||
}
|
||||
return future;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
shutdown.incrementAndGet();
|
||||
}
|
||||
};
|
||||
|
||||
Sniffer sniffer = new Sniffer(restClient, new MockHostsSniffer(), scheduler, sniffInterval, sniffAfterFailureDelay);
|
||||
assertEquals(0, shutdown.get());
|
||||
int iters = randomIntBetween(3, 10);
|
||||
for (int i = 1; i <= iters; i++) {
|
||||
sniffer.close();
|
||||
verify(future, times(i)).cancel(false);
|
||||
assertEquals(i, shutdown.get());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSniffOnFailureNotInitialized() {
|
||||
RestClient restClient = mock(RestClient.class);
|
||||
CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
|
||||
long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
|
||||
long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
|
||||
final AtomicInteger scheduleCalls = new AtomicInteger(0);
|
||||
Scheduler scheduler = new Scheduler() {
|
||||
@Override
|
||||
public Future<?> schedule(Sniffer.Task task, long delayMillis) {
|
||||
scheduleCalls.incrementAndGet();
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
}
|
||||
};
|
||||
|
||||
Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
|
||||
for (int i = 0; i < 10; i++) {
|
||||
sniffer.sniffOnFailure();
|
||||
}
|
||||
assertEquals(1, scheduleCalls.get());
|
||||
int totalRuns = hostsSniffer.runs.get();
|
||||
assertEquals(0, totalRuns);
|
||||
int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get();
|
||||
verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg());
|
||||
verifyNoMoreInteractions(restClient);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test behaviour when a bunch of onFailure sniffing rounds are triggered in parallel. Each run will always
|
||||
* schedule a subsequent afterFailure round. Also, for each onFailure round that starts, the net scheduled round
|
||||
* (either afterFailure or ordinary) gets cancelled.
|
||||
*/
|
||||
public void testSniffOnFailure() throws Exception {
|
||||
RestClient restClient = mock(RestClient.class);
|
||||
CountingHostsSniffer hostsSniffer = new CountingHostsSniffer();
|
||||
final AtomicBoolean initializing = new AtomicBoolean(true);
|
||||
final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
|
||||
final long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
|
||||
int minNumOnFailureRounds = randomIntBetween(5, 10);
|
||||
final CountDownLatch initializingLatch = new CountDownLatch(1);
|
||||
final Set<Sniffer.ScheduledTask> ordinaryRoundsTasks = new CopyOnWriteArraySet<>();
|
||||
final AtomicReference<Future<?>> initializingFuture = new AtomicReference<>();
|
||||
final Set<Sniffer.ScheduledTask> onFailureTasks = new CopyOnWriteArraySet<>();
|
||||
final Set<Sniffer.ScheduledTask> afterFailureTasks = new CopyOnWriteArraySet<>();
|
||||
final AtomicBoolean onFailureCompleted = new AtomicBoolean(false);
|
||||
final CountDownLatch completionLatch = new CountDownLatch(1);
|
||||
final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
|
||||
try {
|
||||
Scheduler scheduler = new Scheduler() {
|
||||
@Override
|
||||
public Future<?> schedule(final Sniffer.Task task, long delayMillis) {
|
||||
if (initializing.compareAndSet(true, false)) {
|
||||
assertEquals(0L, delayMillis);
|
||||
Future<?> future = executor.submit(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
task.run();
|
||||
} finally {
|
||||
//we need to make sure that the sniffer is initialized, so the sniffOnFailure
|
||||
//call does what it needs to do. Otherwise nothing happens until initialized.
|
||||
initializingLatch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
assertTrue(initializingFuture.compareAndSet(null, future));
|
||||
return future;
|
||||
}
|
||||
if (delayMillis == 0L) {
|
||||
Future<?> future = executor.submit(task);
|
||||
onFailureTasks.add(new Sniffer.ScheduledTask(task, future));
|
||||
return future;
|
||||
}
|
||||
if (delayMillis == sniffAfterFailureDelay) {
|
||||
Future<?> future = scheduleOrSubmit(task);
|
||||
afterFailureTasks.add(new Sniffer.ScheduledTask(task, future));
|
||||
return future;
|
||||
}
|
||||
|
||||
assertEquals(sniffInterval, delayMillis);
|
||||
assertEquals(sniffInterval, task.nextTaskDelay);
|
||||
|
||||
if (onFailureCompleted.get() && onFailureTasks.size() == afterFailureTasks.size()) {
|
||||
completionLatch.countDown();
|
||||
return mock(Future.class);
|
||||
}
|
||||
|
||||
Future<?> future = scheduleOrSubmit(task);
|
||||
ordinaryRoundsTasks.add(new Sniffer.ScheduledTask(task, future));
|
||||
return future;
|
||||
}
|
||||
|
||||
private Future<?> scheduleOrSubmit(Sniffer.Task task) {
|
||||
if (randomBoolean()) {
|
||||
return executor.schedule(task, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS);
|
||||
} else {
|
||||
return executor.submit(task);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
}
|
||||
};
|
||||
final Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
|
||||
assertTrue("timeout waiting for sniffer to get initialized", initializingLatch.await(1000, TimeUnit.MILLISECONDS));
|
||||
|
||||
ExecutorService onFailureExecutor = Executors.newFixedThreadPool(randomIntBetween(5, 20));
|
||||
Set<Future<?>> onFailureFutures = new CopyOnWriteArraySet<>();
|
||||
try {
|
||||
//with tasks executing quickly one after each other, it is very likely that the onFailure round gets skipped
|
||||
//as another round is already running. We retry till enough runs get through as that's what we want to test.
|
||||
while (onFailureTasks.size() < minNumOnFailureRounds) {
|
||||
onFailureFutures.add(onFailureExecutor.submit(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
sniffer.sniffOnFailure();
|
||||
}
|
||||
}));
|
||||
}
|
||||
assertThat(onFailureFutures.size(), greaterThanOrEqualTo(minNumOnFailureRounds));
|
||||
for (Future<?> onFailureFuture : onFailureFutures) {
|
||||
assertNull(onFailureFuture.get());
|
||||
}
|
||||
onFailureCompleted.set(true);
|
||||
} finally {
|
||||
onFailureExecutor.shutdown();
|
||||
onFailureExecutor.awaitTermination(1000, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
assertFalse(initializingFuture.get().isCancelled());
|
||||
assertTrue(initializingFuture.get().isDone());
|
||||
assertNull(initializingFuture.get().get());
|
||||
|
||||
assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS));
|
||||
assertThat(onFailureTasks.size(), greaterThanOrEqualTo(minNumOnFailureRounds));
|
||||
assertEquals(onFailureTasks.size(), afterFailureTasks.size());
|
||||
|
||||
for (Sniffer.ScheduledTask onFailureTask : onFailureTasks) {
|
||||
assertFalse(onFailureTask.future.isCancelled());
|
||||
assertTrue(onFailureTask.future.isDone());
|
||||
assertNull(onFailureTask.future.get());
|
||||
assertTrue(onFailureTask.task.hasStarted());
|
||||
assertFalse(onFailureTask.task.isSkipped());
|
||||
}
|
||||
|
||||
int cancelledTasks = 0;
|
||||
int completedTasks = onFailureTasks.size() + 1;
|
||||
for (Sniffer.ScheduledTask afterFailureTask : afterFailureTasks) {
|
||||
if (assertTaskCancelledOrCompleted(afterFailureTask)) {
|
||||
completedTasks++;
|
||||
} else {
|
||||
cancelledTasks++;
|
||||
}
|
||||
}
|
||||
|
||||
assertThat(ordinaryRoundsTasks.size(), greaterThan(0));
|
||||
for (Sniffer.ScheduledTask task : ordinaryRoundsTasks) {
|
||||
if (assertTaskCancelledOrCompleted(task)) {
|
||||
completedTasks++;
|
||||
} else {
|
||||
cancelledTasks++;
|
||||
}
|
||||
}
|
||||
assertEquals(onFailureTasks.size(), cancelledTasks);
|
||||
|
||||
assertEquals(completedTasks, hostsSniffer.runs.get());
|
||||
int setHostsRuns = hostsSniffer.runs.get() - hostsSniffer.failures.get() - hostsSniffer.emptyList.get();
|
||||
verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg());
|
||||
verifyNoMoreInteractions(restClient);
|
||||
} finally {
|
||||
executor.shutdown();
|
||||
executor.awaitTermination(1000L, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean assertTaskCancelledOrCompleted(Sniffer.ScheduledTask task) throws ExecutionException, InterruptedException {
|
||||
if (task.task.isSkipped()) {
|
||||
assertTrue(task.future.isCancelled());
|
||||
try {
|
||||
task.future.get();
|
||||
fail("cancellation exception should have been thrown");
|
||||
} catch(CancellationException ignore) {
|
||||
}
|
||||
return false;
|
||||
} else {
|
||||
try {
|
||||
assertNull(task.future.get());
|
||||
} catch(CancellationException ignore) {
|
||||
assertTrue(task.future.isCancelled());
|
||||
}
|
||||
assertTrue(task.future.isDone());
|
||||
assertTrue(task.task.hasStarted());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
public void testTaskCancelling() throws Exception {
|
||||
RestClient restClient = mock(RestClient.class);
|
||||
HostsSniffer hostsSniffer = mock(HostsSniffer.class);
|
||||
Scheduler noOpScheduler = new Scheduler() {
|
||||
@Override
|
||||
public Future<?> schedule(Sniffer.Task task, long delayMillis) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
}
|
||||
};
|
||||
Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L);
|
||||
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
|
||||
try {
|
||||
int numIters = randomIntBetween(50, 100);
|
||||
for (int i = 0; i < numIters; i++) {
|
||||
Sniffer.Task task = sniffer.new Task(0L);
|
||||
TaskWrapper wrapper = new TaskWrapper(task);
|
||||
Future<?> future;
|
||||
if (rarely()) {
|
||||
future = executor.schedule(wrapper, randomLongBetween(0L, 200L), TimeUnit.MILLISECONDS);
|
||||
} else {
|
||||
future = executor.submit(wrapper);
|
||||
}
|
||||
Sniffer.ScheduledTask scheduledTask = new Sniffer.ScheduledTask(task, future);
|
||||
boolean skip = scheduledTask.skip();
|
||||
try {
|
||||
assertNull(future.get());
|
||||
} catch(CancellationException ignore) {
|
||||
assertTrue(future.isCancelled());
|
||||
}
|
||||
|
||||
if (skip) {
|
||||
//the task was either cancelled before starting, in which case it will never start (thanks to Future#cancel),
|
||||
//or skipped, in which case it will run but do nothing (thanks to Task#skip).
|
||||
//Here we want to make sure that whenever skip returns true, the task either won't run or it won't do anything,
|
||||
//otherwise we may end up with parallel sniffing tracks given that each task schedules the following one. We need to
|
||||
// make sure that onFailure takes scheduling over while at the same time ordinary rounds don't go on.
|
||||
assertFalse(task.hasStarted());
|
||||
assertTrue(task.isSkipped());
|
||||
assertTrue(future.isCancelled());
|
||||
assertTrue(future.isDone());
|
||||
} else {
|
||||
//if a future is cancelled when its execution has already started, future#get throws CancellationException before
|
||||
//completion. The execution continues though so we use a latch to try and wait for the task to be completed.
|
||||
//Here we want to make sure that whenever skip returns false, the task will be completed, otherwise we may be
|
||||
//missing to schedule the following round, which means no sniffing will ever happen again besides on failure sniffing.
|
||||
assertTrue(wrapper.await());
|
||||
//the future may or may not be cancelled but the task has for sure started and completed
|
||||
assertTrue(task.toString(), task.hasStarted());
|
||||
assertFalse(task.isSkipped());
|
||||
assertTrue(future.isDone());
|
||||
}
|
||||
//subsequent cancel calls return false for sure
|
||||
int cancelCalls = randomIntBetween(1, 10);
|
||||
for (int j = 0; j < cancelCalls; j++) {
|
||||
assertFalse(scheduledTask.skip());
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
executor.shutdown();
|
||||
executor.awaitTermination(1000, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps a {@link Sniffer.Task} and allows to wait for its completion. This is needed to verify
|
||||
* that tasks are either never started or always completed. Calling {@link Future#get()} against a cancelled future will
|
||||
* throw {@link CancellationException} straight-away but the execution of the task will continue if it had already started,
|
||||
* in which case {@link Future#cancel(boolean)} returns true which is not very helpful.
|
||||
*/
|
||||
private static final class TaskWrapper implements Runnable {
|
||||
final Sniffer.Task task;
|
||||
final CountDownLatch completionLatch = new CountDownLatch(1);
|
||||
|
||||
TaskWrapper(Sniffer.Task task) {
|
||||
this.task = task;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
task.run();
|
||||
} finally {
|
||||
completionLatch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
boolean await() throws InterruptedException {
|
||||
return completionLatch.await(1000, TimeUnit.MILLISECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock {@link HostsSniffer} implementation used for testing, which most of the times return a fixed host.
|
||||
* It rarely throws exception or return an empty list of hosts, to make sure that such situations are properly handled.
|
||||
* It also asserts that it never gets called concurrently, based on the assumption that only one sniff run can be run
|
||||
* at a given point in time.
|
||||
*/
|
||||
private static class CountingHostsSniffer implements HostsSniffer {
|
||||
private final AtomicInteger runs = new AtomicInteger(0);
|
||||
private final AtomicInteger failures = new AtomicInteger(0);
|
||||
private final AtomicInteger emptyList = new AtomicInteger(0);
|
||||
|
||||
@Override
|
||||
public List<HttpHost> sniffHosts() throws IOException {
|
||||
int run = runs.incrementAndGet();
|
||||
if (rarely()) {
|
||||
failures.incrementAndGet();
|
||||
//check that if communication breaks, sniffer keeps on working
|
||||
throw new IOException("communication breakdown");
|
||||
}
|
||||
if (rarely()) {
|
||||
emptyList.incrementAndGet();
|
||||
return Collections.emptyList();
|
||||
}
|
||||
return buildHosts(run);
|
||||
}
|
||||
|
||||
private static List<HttpHost> buildHosts(int run) {
|
||||
int size = run % 5 + 1;
|
||||
assert size > 0;
|
||||
List<HttpHost> hosts = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
hosts.add(new HttpHost("sniffed-" + run, 9200 + i));
|
||||
}
|
||||
return hosts;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testDefaultSchedulerSchedule() {
|
||||
RestClient restClient = mock(RestClient.class);
|
||||
HostsSniffer hostsSniffer = mock(HostsSniffer.class);
|
||||
Scheduler noOpScheduler = new Scheduler() {
|
||||
@Override
|
||||
public Future<?> schedule(Sniffer.Task task, long delayMillis) {
|
||||
return mock(Future.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
|
||||
}
|
||||
};
|
||||
Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L);
|
||||
Sniffer.Task task = sniffer.new Task(randomLongBetween(1, Long.MAX_VALUE));
|
||||
|
||||
ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class);
|
||||
final ScheduledFuture<?> mockedFuture = mock(ScheduledFuture.class);
|
||||
when(scheduledExecutorService.schedule(any(Runnable.class), any(Long.class), any(TimeUnit.class)))
|
||||
.then(new Answer<ScheduledFuture<?>>() {
|
||||
@Override
|
||||
public ScheduledFuture<?> answer(InvocationOnMock invocationOnMock) {
|
||||
return mockedFuture;
|
||||
}
|
||||
});
|
||||
DefaultScheduler scheduler = new DefaultScheduler(scheduledExecutorService);
|
||||
long delay = randomLongBetween(1, Long.MAX_VALUE);
|
||||
Future<?> future = scheduler.schedule(task, delay);
|
||||
assertSame(mockedFuture, future);
|
||||
verify(scheduledExecutorService).schedule(task, delay, TimeUnit.MILLISECONDS);
|
||||
verifyNoMoreInteractions(scheduledExecutorService, mockedFuture);
|
||||
}
|
||||
|
||||
public void testDefaultSchedulerThreadFactory() {
|
||||
DefaultScheduler defaultScheduler = new DefaultScheduler();
|
||||
try {
|
||||
ScheduledExecutorService executorService = defaultScheduler.executor;
|
||||
assertThat(executorService, instanceOf(ScheduledThreadPoolExecutor.class));
|
||||
assertThat(executorService, instanceOf(ScheduledThreadPoolExecutor.class));
|
||||
ScheduledThreadPoolExecutor executor = (ScheduledThreadPoolExecutor) executorService;
|
||||
assertTrue(executor.getRemoveOnCancelPolicy());
|
||||
assertFalse(executor.getContinueExistingPeriodicTasksAfterShutdownPolicy());
|
||||
assertTrue(executor.getExecuteExistingDelayedTasksAfterShutdownPolicy());
|
||||
assertThat(executor.getThreadFactory(), instanceOf(Sniffer.SnifferThreadFactory.class));
|
||||
int iters = randomIntBetween(3, 10);
|
||||
for (int i = 1; i <= iters; i++) {
|
||||
Thread thread = executor.getThreadFactory().newThread(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
}
|
||||
});
|
||||
assertThat(thread.getName(), equalTo("es_rest_client_sniffer[T#" + i + "]"));
|
||||
assertThat(thread.isDaemon(), is(true));
|
||||
}
|
||||
} finally {
|
||||
defaultScheduler.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
public void testDefaultSchedulerShutdown() throws Exception {
|
||||
ScheduledThreadPoolExecutor executor = mock(ScheduledThreadPoolExecutor.class);
|
||||
DefaultScheduler defaultScheduler = new DefaultScheduler(executor);
|
||||
defaultScheduler.shutdown();
|
||||
verify(executor).shutdown();
|
||||
verify(executor).awaitTermination(1000, TimeUnit.MILLISECONDS);
|
||||
verify(executor).shutdownNow();
|
||||
verifyNoMoreInteractions(executor);
|
||||
|
||||
when(executor.awaitTermination(1000, TimeUnit.MILLISECONDS)).thenReturn(true);
|
||||
defaultScheduler.shutdown();
|
||||
verify(executor, times(2)).shutdown();
|
||||
verify(executor, times(2)).awaitTermination(1000, TimeUnit.MILLISECONDS);
|
||||
verifyNoMoreInteractions(executor);
|
||||
}
|
||||
}
|
|
@ -6,7 +6,9 @@ See: https://github.com/elastic/docs
|
|||
Snippets marked with `// CONSOLE` are automatically annotated with "VIEW IN
|
||||
CONSOLE" and "COPY AS CURL" in the documentation and are automatically tested
|
||||
by the command `gradle :docs:check`. To test just the docs from a single page,
|
||||
use e.g. `gradle :docs:check -Dtests.method="*rollover*"`.
|
||||
use e.g. `gradle :docs:check -Dtests.method="\*rollover*"`.
|
||||
|
||||
NOTE: If you have an elasticsearch-extra folder alongside your elasticsearch folder, you must temporarily rename it when you are testing 6.3 or later branches.
|
||||
|
||||
By default each `// CONSOLE` snippet runs as its own isolated test. You can
|
||||
manipulate the test execution in the following ways:
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
[[java-rest-high-cluster-get-pipeline]]
|
||||
=== Get Pipeline API
|
||||
|
||||
[[java-rest-high-cluster-get-pipeline-request]]
|
||||
==== Get Pipeline Request
|
||||
|
||||
A `GetPipelineRequest` requires one or more `pipelineIds` to fetch.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-request]
|
||||
--------------------------------------------------
|
||||
<1> The pipeline id to fetch
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
[[java-rest-high-cluster-get-pipeline-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-execute]
|
||||
--------------------------------------------------
|
||||
<1> Execute the request and get back the response in a GetPipelineResponse object.
|
||||
|
||||
[[java-rest-high-cluster-get-pipeline-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a get pipeline request requires both the `GetPipelineRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `GetPipelineRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `GetPipelineResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-cluster-get-pipeline-response]]
|
||||
==== Get Pipeline Response
|
||||
|
||||
The returned `GetPipelineResponse` allows to retrieve information about the executed
|
||||
operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[get-pipeline-response]
|
||||
--------------------------------------------------
|
||||
<1> Check if a matching pipeline id was found or not.
|
||||
<2> Get the list of pipelines found as a list of `PipelineConfig` objects.
|
||||
<3> Get the individual configuration of each pipeline as a `Map<String, Object>`.
|
|
@ -107,9 +107,11 @@ The Java High Level REST Client supports the following Cluster APIs:
|
|||
|
||||
* <<java-rest-high-cluster-put-settings>>
|
||||
* <<java-rest-high-cluster-put-pipeline>>
|
||||
* <<java-rest-high-cluster-get-pipeline>>
|
||||
|
||||
include::cluster/put_settings.asciidoc[]
|
||||
include::cluster/put_pipeline.asciidoc[]
|
||||
include::cluster/get_pipeline.asciidoc[]
|
||||
|
||||
== Snapshot APIs
|
||||
|
||||
|
|
|
@ -103,6 +103,12 @@ The following parameters are accepted by `keyword` fields:
|
|||
How to pre-process the keyword prior to indexing. Defaults to `null`,
|
||||
meaning the keyword is kept as-is.
|
||||
|
||||
`split_queries_on_whitespace`::
|
||||
|
||||
Whether <<full-text-queries,full text queries>> should split the input on whitespace
|
||||
when building a query for this field.
|
||||
Accepts `true` or `false` (default).
|
||||
|
||||
NOTE: Indexes imported from 2.x do not support `keyword`. Instead they will
|
||||
attempt to downgrade `keyword` into `string`. This allows you to merge modern
|
||||
mappings with legacy mappings. Long lived indexes will have to be recreated
|
||||
|
|
|
@ -84,6 +84,10 @@ PUT place_path_category
|
|||
NOTE: Adding context mappings increases the index size for completion field. The completion index
|
||||
is entirely heap resident, you can monitor the completion field index size using <<indices-stats>>.
|
||||
|
||||
NOTE: deprecated[7.0.0, Indexing a suggestion without context on a context enabled completion field is deprecated
|
||||
and will be removed in the next major release. If you want to index a suggestion that matches all contexts you should
|
||||
add a special context for it.]
|
||||
|
||||
[[suggester-context-category]]
|
||||
[float]
|
||||
==== Category Context
|
||||
|
@ -156,9 +160,9 @@ POST place/_search?pretty
|
|||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
NOTE: When no categories are provided at query-time, all indexed documents are considered.
|
||||
Querying with no categories on a category enabled completion field should be avoided, as it
|
||||
will degrade search performance.
|
||||
Note: deprecated[7.0.0, When no categories are provided at query-time, all indexed documents are considered.
|
||||
Querying with no categories on a category enabled completion field is deprecated and will be removed in the next major release
|
||||
as it degrades search performance considerably.]
|
||||
|
||||
Suggestions with certain categories can be boosted higher than others.
|
||||
The following filters suggestions by categories and additionally boosts
|
||||
|
|
|
@ -52,8 +52,7 @@ import static org.elasticsearch.ingest.common.IngestCommonPlugin.GROK_PATTERNS;
|
|||
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||
import static org.elasticsearch.rest.RestStatus.OK;
|
||||
|
||||
public class GrokProcessorGetAction extends Action<GrokProcessorGetAction.Request,
|
||||
GrokProcessorGetAction.Response, GrokProcessorGetAction.RequestBuilder> {
|
||||
public class GrokProcessorGetAction extends Action<GrokProcessorGetAction.Request, GrokProcessorGetAction.Response> {
|
||||
|
||||
public static final GrokProcessorGetAction INSTANCE = new GrokProcessorGetAction();
|
||||
public static final String NAME = "cluster:admin/ingest/processor/grok/get";
|
||||
|
@ -62,11 +61,6 @@ public class GrokProcessorGetAction extends Action<GrokProcessorGetAction.Reques
|
|||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new RequestBuilder(client);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Response newResponse() {
|
||||
return new Response(null);
|
||||
|
@ -79,7 +73,7 @@ public class GrokProcessorGetAction extends Action<GrokProcessorGetAction.Reques
|
|||
}
|
||||
}
|
||||
|
||||
public static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
|
||||
public static class RequestBuilder extends ActionRequestBuilder<Request, Response> {
|
||||
public RequestBuilder(ElasticsearchClient client) {
|
||||
super(client, GrokProcessorGetAction.INSTANCE, new Request());
|
||||
}
|
||||
|
|
|
@ -20,10 +20,8 @@
|
|||
package org.elasticsearch.script.mustache;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class MultiSearchTemplateAction
|
||||
extends Action<MultiSearchTemplateRequest, MultiSearchTemplateResponse, MultiSearchTemplateRequestBuilder> {
|
||||
public class MultiSearchTemplateAction extends Action<MultiSearchTemplateRequest, MultiSearchTemplateResponse> {
|
||||
|
||||
public static final MultiSearchTemplateAction INSTANCE = new MultiSearchTemplateAction();
|
||||
public static final String NAME = "indices:data/read/msearch/template";
|
||||
|
@ -36,9 +34,4 @@ public class MultiSearchTemplateAction
|
|||
public MultiSearchTemplateResponse newResponse() {
|
||||
return new MultiSearchTemplateResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public MultiSearchTemplateRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new MultiSearchTemplateRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.elasticsearch.action.support.IndicesOptions;
|
|||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class MultiSearchTemplateRequestBuilder
|
||||
extends ActionRequestBuilder<MultiSearchTemplateRequest, MultiSearchTemplateResponse, MultiSearchTemplateRequestBuilder> {
|
||||
extends ActionRequestBuilder<MultiSearchTemplateRequest, MultiSearchTemplateResponse> {
|
||||
|
||||
protected MultiSearchTemplateRequestBuilder(ElasticsearchClient client, MultiSearchTemplateAction action) {
|
||||
super(client, action, new MultiSearchTemplateRequest());
|
||||
|
|
|
@ -20,9 +20,8 @@
|
|||
package org.elasticsearch.script.mustache;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class SearchTemplateAction extends Action<SearchTemplateRequest, SearchTemplateResponse, SearchTemplateRequestBuilder> {
|
||||
public class SearchTemplateAction extends Action<SearchTemplateRequest, SearchTemplateResponse> {
|
||||
|
||||
public static final SearchTemplateAction INSTANCE = new SearchTemplateAction();
|
||||
public static final String NAME = "indices:data/read/search/template";
|
||||
|
@ -31,11 +30,6 @@ public class SearchTemplateAction extends Action<SearchTemplateRequest, SearchTe
|
|||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchTemplateRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new SearchTemplateRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchTemplateResponse newResponse() {
|
||||
return new SearchTemplateResponse();
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.script.ScriptType;
|
|||
import java.util.Map;
|
||||
|
||||
public class SearchTemplateRequestBuilder
|
||||
extends ActionRequestBuilder<SearchTemplateRequest, SearchTemplateResponse, SearchTemplateRequestBuilder> {
|
||||
extends ActionRequestBuilder<SearchTemplateRequest, SearchTemplateResponse> {
|
||||
|
||||
SearchTemplateRequestBuilder(ElasticsearchClient client, SearchTemplateAction action) {
|
||||
super(client, action, new SearchTemplateRequest());
|
||||
|
|
|
@ -62,8 +62,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET;
|
|||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
import static org.elasticsearch.rest.RestStatus.OK;
|
||||
|
||||
public class PainlessExecuteAction extends Action<PainlessExecuteAction.Request, PainlessExecuteAction.Response,
|
||||
PainlessExecuteAction.RequestBuilder> {
|
||||
public class PainlessExecuteAction extends Action<PainlessExecuteAction.Request, PainlessExecuteAction.Response> {
|
||||
|
||||
static final PainlessExecuteAction INSTANCE = new PainlessExecuteAction();
|
||||
private static final String NAME = "cluster:admin/scripts/painless/execute";
|
||||
|
@ -72,11 +71,6 @@ public class PainlessExecuteAction extends Action<PainlessExecuteAction.Request,
|
|||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new RequestBuilder(client);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Response newResponse() {
|
||||
return new Response();
|
||||
|
@ -201,7 +195,7 @@ public class PainlessExecuteAction extends Action<PainlessExecuteAction.Request,
|
|||
|
||||
}
|
||||
|
||||
public static class RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder> {
|
||||
public static class RequestBuilder extends ActionRequestBuilder<Request, Response> {
|
||||
|
||||
RequestBuilder(ElasticsearchClient client) {
|
||||
super(client, INSTANCE, new Request());
|
||||
|
|
|
@ -163,14 +163,6 @@ public class FeatureFieldMapper extends FieldMapper {
|
|||
return new TermQuery(new Term("_feature", name()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query nullValueQuery() {
|
||||
if (nullValue() == null) {
|
||||
return null;
|
||||
}
|
||||
return termQuery(nullValue(), null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) {
|
||||
failIfNoDocValues();
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
package org.elasticsearch.index.rankeval;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Action for explaining evaluating search ranking results.
|
||||
*/
|
||||
public class RankEvalAction extends Action<RankEvalRequest, RankEvalResponse, RankEvalRequestBuilder> {
|
||||
public class RankEvalAction extends Action<RankEvalRequest, RankEvalResponse> {
|
||||
|
||||
public static final RankEvalAction INSTANCE = new RankEvalAction();
|
||||
public static final String NAME = "indices:data/read/rank_eval";
|
||||
|
@ -34,11 +33,6 @@ public class RankEvalAction extends Action<RankEvalRequest, RankEvalResponse, Ra
|
|||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RankEvalRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new RankEvalRequestBuilder(client, this, new RankEvalRequest());
|
||||
}
|
||||
|
||||
@Override
|
||||
public RankEvalResponse newResponse() {
|
||||
return new RankEvalResponse();
|
||||
|
|
|
@ -23,9 +23,9 @@ import org.elasticsearch.action.Action;
|
|||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class RankEvalRequestBuilder extends ActionRequestBuilder<RankEvalRequest, RankEvalResponse, RankEvalRequestBuilder> {
|
||||
public class RankEvalRequestBuilder extends ActionRequestBuilder<RankEvalRequest, RankEvalResponse> {
|
||||
|
||||
public RankEvalRequestBuilder(ElasticsearchClient client, Action<RankEvalRequest, RankEvalResponse, RankEvalRequestBuilder> action,
|
||||
public RankEvalRequestBuilder(ElasticsearchClient client, Action<RankEvalRequest, RankEvalResponse> action,
|
||||
RankEvalRequest request) {
|
||||
super(client, action, request);
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ class BulkByScrollParallelizationHelper {
|
|||
static <Request extends AbstractBulkByScrollRequest<Request>> void startSlicedAction(
|
||||
Request request,
|
||||
BulkByScrollTask task,
|
||||
Action<Request, BulkByScrollResponse, ?> action,
|
||||
Action<Request, BulkByScrollResponse> action,
|
||||
ActionListener<BulkByScrollResponse> listener,
|
||||
Client client,
|
||||
DiscoveryNode node,
|
||||
|
@ -85,7 +85,7 @@ class BulkByScrollParallelizationHelper {
|
|||
private static <Request extends AbstractBulkByScrollRequest<Request>> void sliceConditionally(
|
||||
Request request,
|
||||
BulkByScrollTask task,
|
||||
Action<Request, BulkByScrollResponse, ?> action,
|
||||
Action<Request, BulkByScrollResponse> action,
|
||||
ActionListener<BulkByScrollResponse> listener,
|
||||
Client client,
|
||||
DiscoveryNode node,
|
||||
|
@ -118,7 +118,7 @@ class BulkByScrollParallelizationHelper {
|
|||
|
||||
private static <Request extends AbstractBulkByScrollRequest<Request>> void sendSubRequests(
|
||||
Client client,
|
||||
Action<Request, BulkByScrollResponse, ?> action,
|
||||
Action<Request, BulkByScrollResponse> action,
|
||||
String localNodeId,
|
||||
BulkByScrollTask task,
|
||||
Request request,
|
||||
|
|
|
@ -21,9 +21,8 @@ package org.elasticsearch.index.reindex;
|
|||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class RethrottleAction extends Action<RethrottleRequest, ListTasksResponse, RethrottleRequestBuilder> {
|
||||
public class RethrottleAction extends Action<RethrottleRequest, ListTasksResponse> {
|
||||
public static final RethrottleAction INSTANCE = new RethrottleAction();
|
||||
public static final String NAME = "cluster:admin/reindex/rethrottle";
|
||||
|
||||
|
@ -31,11 +30,6 @@ public class RethrottleAction extends Action<RethrottleRequest, ListTasksRespons
|
|||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RethrottleRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new RethrottleRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ListTasksResponse newResponse() {
|
||||
return new ListTasksResponse();
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.elasticsearch.client.ElasticsearchClient;
|
|||
*/
|
||||
public class RethrottleRequestBuilder extends TasksRequestBuilder<RethrottleRequest, ListTasksResponse, RethrottleRequestBuilder> {
|
||||
public RethrottleRequestBuilder(ElasticsearchClient client,
|
||||
Action<RethrottleRequest, ListTasksResponse, RethrottleRequestBuilder> action) {
|
||||
Action<RethrottleRequest, ListTasksResponse> action) {
|
||||
super(client, action, new RethrottleRequest());
|
||||
}
|
||||
|
||||
|
|
|
@ -27,10 +27,12 @@ import org.elasticsearch.index.query.QueryBuilders;
|
|||
import org.elasticsearch.index.reindex.BulkByScrollResponse;
|
||||
import org.elasticsearch.index.reindex.BulkByScrollTask;
|
||||
import org.elasticsearch.index.reindex.DeleteByQueryAction;
|
||||
import org.elasticsearch.index.reindex.DeleteByQueryRequestBuilder;
|
||||
import org.elasticsearch.index.reindex.ReindexAction;
|
||||
import org.elasticsearch.index.reindex.ReindexRequest;
|
||||
import org.elasticsearch.index.reindex.ReindexRequestBuilder;
|
||||
import org.elasticsearch.index.reindex.RethrottleAction;
|
||||
import org.elasticsearch.index.reindex.RethrottleRequestBuilder;
|
||||
import org.elasticsearch.index.reindex.UpdateByQueryAction;
|
||||
import org.elasticsearch.index.reindex.UpdateByQueryRequestBuilder;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
@ -47,7 +49,7 @@ public class ReindexDocumentationIT extends ESIntegTestCase {
|
|||
public void reindex() {
|
||||
Client client = client();
|
||||
// tag::reindex1
|
||||
BulkByScrollResponse response = ReindexAction.INSTANCE.newRequestBuilder(client)
|
||||
BulkByScrollResponse response = new ReindexRequestBuilder(client, ReindexAction.INSTANCE)
|
||||
.destination("target_index")
|
||||
.filter(QueryBuilders.matchQuery("category", "xzy")) // <1>
|
||||
.get();
|
||||
|
@ -58,14 +60,14 @@ public class ReindexDocumentationIT extends ESIntegTestCase {
|
|||
Client client = client();
|
||||
{
|
||||
// tag::update-by-query
|
||||
UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client);
|
||||
UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE);
|
||||
updateByQuery.source("source_index").abortOnVersionConflict(false);
|
||||
BulkByScrollResponse response = updateByQuery.get();
|
||||
// end::update-by-query
|
||||
}
|
||||
{
|
||||
// tag::update-by-query-filter
|
||||
UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client);
|
||||
UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE);
|
||||
updateByQuery.source("source_index")
|
||||
.filter(QueryBuilders.termQuery("level", "awesome"))
|
||||
.size(1000)
|
||||
|
@ -75,7 +77,7 @@ public class ReindexDocumentationIT extends ESIntegTestCase {
|
|||
}
|
||||
{
|
||||
// tag::update-by-query-size
|
||||
UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client);
|
||||
UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE);
|
||||
updateByQuery.source("source_index")
|
||||
.source().setSize(500);
|
||||
BulkByScrollResponse response = updateByQuery.get();
|
||||
|
@ -83,7 +85,7 @@ public class ReindexDocumentationIT extends ESIntegTestCase {
|
|||
}
|
||||
{
|
||||
// tag::update-by-query-sort
|
||||
UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client);
|
||||
UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE);
|
||||
updateByQuery.source("source_index").size(100)
|
||||
.source().addSort("cat", SortOrder.DESC);
|
||||
BulkByScrollResponse response = updateByQuery.get();
|
||||
|
@ -91,7 +93,7 @@ public class ReindexDocumentationIT extends ESIntegTestCase {
|
|||
}
|
||||
{
|
||||
// tag::update-by-query-script
|
||||
UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client);
|
||||
UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE);
|
||||
updateByQuery.source("source_index")
|
||||
.script(new Script(
|
||||
ScriptType.INLINE,
|
||||
|
@ -108,21 +110,21 @@ public class ReindexDocumentationIT extends ESIntegTestCase {
|
|||
}
|
||||
{
|
||||
// tag::update-by-query-multi-index
|
||||
UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client);
|
||||
UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE);
|
||||
updateByQuery.source("foo", "bar").source().setTypes("a", "b");
|
||||
BulkByScrollResponse response = updateByQuery.get();
|
||||
// end::update-by-query-multi-index
|
||||
}
|
||||
{
|
||||
// tag::update-by-query-routing
|
||||
UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client);
|
||||
UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE);
|
||||
updateByQuery.source().setRouting("cat");
|
||||
BulkByScrollResponse response = updateByQuery.get();
|
||||
// end::update-by-query-routing
|
||||
}
|
||||
{
|
||||
// tag::update-by-query-pipeline
|
||||
UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client);
|
||||
UpdateByQueryRequestBuilder updateByQuery = new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE);
|
||||
updateByQuery.setPipeline("hurray");
|
||||
BulkByScrollResponse response = updateByQuery.get();
|
||||
// end::update-by-query-pipeline
|
||||
|
@ -156,7 +158,7 @@ public class ReindexDocumentationIT extends ESIntegTestCase {
|
|||
{
|
||||
TaskId taskId = null;
|
||||
// tag::update-by-query-rethrottle
|
||||
RethrottleAction.INSTANCE.newRequestBuilder(client)
|
||||
new RethrottleRequestBuilder(client, RethrottleAction.INSTANCE)
|
||||
.setTaskId(taskId)
|
||||
.setRequestsPerSecond(2.0f)
|
||||
.get();
|
||||
|
@ -167,7 +169,7 @@ public class ReindexDocumentationIT extends ESIntegTestCase {
|
|||
public void deleteByQuery() {
|
||||
Client client = client();
|
||||
// tag::delete-by-query-sync
|
||||
BulkByScrollResponse response = DeleteByQueryAction.INSTANCE.newRequestBuilder(client)
|
||||
BulkByScrollResponse response = new DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE)
|
||||
.filter(QueryBuilders.matchQuery("gender", "male")) // <1>
|
||||
.source("persons") // <2>
|
||||
.get(); // <3>
|
||||
|
@ -175,7 +177,7 @@ public class ReindexDocumentationIT extends ESIntegTestCase {
|
|||
// end::delete-by-query-sync
|
||||
|
||||
// tag::delete-by-query-async
|
||||
DeleteByQueryAction.INSTANCE.newRequestBuilder(client)
|
||||
new DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE)
|
||||
.filter(QueryBuilders.matchQuery("gender", "male")) // <1>
|
||||
.source("persons") // <2>
|
||||
.execute(new ActionListener<BulkByScrollResponse>() { // <3>
|
||||
|
|
|
@ -744,8 +744,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
protected <Request extends ActionRequest, Response extends ActionResponse,
|
||||
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
|
||||
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
RequestBuilder extends ActionRequestBuilder<Request, Response>> void doExecute(
|
||||
Action<Request, Response> action, Request request, ActionListener<Response> listener) {
|
||||
if (false == expectedHeaders.equals(threadPool().getThreadContext().getHeaders())) {
|
||||
listener.onFailure(
|
||||
new RuntimeException("Expected " + expectedHeaders + " but got " + threadPool().getThreadContext().getHeaders()));
|
||||
|
|
|
@ -109,13 +109,13 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
|
||||
public void testReindexFromRemoteWithAuthentication() throws Exception {
|
||||
ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client()).source("source").destination("dest")
|
||||
ReindexRequestBuilder request = new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source("source").destination("dest")
|
||||
.setRemoteInfo(newRemoteInfo("Aladdin", "open sesame", emptyMap()));
|
||||
assertThat(request.get(), matcher().created(1));
|
||||
}
|
||||
|
||||
public void testReindexSendsHeaders() throws Exception {
|
||||
ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client()).source("source").destination("dest")
|
||||
ReindexRequestBuilder request = new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source("source").destination("dest")
|
||||
.setRemoteInfo(newRemoteInfo(null, null, singletonMap(TestFilter.EXAMPLE_HEADER, "doesn't matter")));
|
||||
ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> request.get());
|
||||
assertEquals(RestStatus.BAD_REQUEST, e.status());
|
||||
|
@ -123,7 +123,7 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
|
||||
public void testReindexWithoutAuthenticationWhenRequired() throws Exception {
|
||||
ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client()).source("source").destination("dest")
|
||||
ReindexRequestBuilder request = new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source("source").destination("dest")
|
||||
.setRemoteInfo(newRemoteInfo(null, null, emptyMap()));
|
||||
ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> request.get());
|
||||
assertEquals(RestStatus.UNAUTHORIZED, e.status());
|
||||
|
@ -132,7 +132,7 @@ public class ReindexFromRemoteWithAuthTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
|
||||
public void testReindexWithBadAuthentication() throws Exception {
|
||||
ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client()).source("source").destination("dest")
|
||||
ReindexRequestBuilder request = new ReindexRequestBuilder(client(), ReindexAction.INSTANCE).source("source").destination("dest")
|
||||
.setRemoteInfo(newRemoteInfo("junk", "auth", emptyMap()));
|
||||
ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> request.get());
|
||||
assertThat(e.getMessage(), containsString("\"reason\":\"Bad Authorization\""));
|
||||
|
|
|
@ -47,19 +47,19 @@ public abstract class ReindexTestCase extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
protected ReindexRequestBuilder reindex() {
|
||||
return ReindexAction.INSTANCE.newRequestBuilder(client());
|
||||
return new ReindexRequestBuilder(client(), ReindexAction.INSTANCE);
|
||||
}
|
||||
|
||||
protected UpdateByQueryRequestBuilder updateByQuery() {
|
||||
return UpdateByQueryAction.INSTANCE.newRequestBuilder(client());
|
||||
return new UpdateByQueryRequestBuilder(client(), UpdateByQueryAction.INSTANCE);
|
||||
}
|
||||
|
||||
protected DeleteByQueryRequestBuilder deleteByQuery() {
|
||||
return DeleteByQueryAction.INSTANCE.newRequestBuilder(client());
|
||||
return new DeleteByQueryRequestBuilder(client(), DeleteByQueryAction.INSTANCE);
|
||||
}
|
||||
|
||||
protected RethrottleRequestBuilder rethrottle() {
|
||||
return RethrottleAction.INSTANCE.newRequestBuilder(client());
|
||||
return new RethrottleRequestBuilder(client(), RethrottleAction.INSTANCE);
|
||||
}
|
||||
|
||||
public static BulkIndexByScrollResponseMatcher matcher() {
|
||||
|
|
|
@ -106,7 +106,7 @@ public class RetryTests extends ESIntegTestCase {
|
|||
public void testReindex() throws Exception {
|
||||
testCase(
|
||||
ReindexAction.NAME,
|
||||
client -> ReindexAction.INSTANCE.newRequestBuilder(client).source("source").destination("dest"),
|
||||
client -> new ReindexRequestBuilder(client, ReindexAction.INSTANCE).source("source").destination("dest"),
|
||||
matcher().created(DOC_COUNT));
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ public class RetryTests extends ESIntegTestCase {
|
|||
TransportAddress address = masterNode.getHttp().getAddress().publishAddress();
|
||||
RemoteInfo remote = new RemoteInfo("http", address.getAddress(), address.getPort(), new BytesArray("{\"match_all\":{}}"), null,
|
||||
null, emptyMap(), RemoteInfo.DEFAULT_SOCKET_TIMEOUT, RemoteInfo.DEFAULT_CONNECT_TIMEOUT);
|
||||
ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client).source("source").destination("dest")
|
||||
ReindexRequestBuilder request = new ReindexRequestBuilder(client, ReindexAction.INSTANCE).source("source").destination("dest")
|
||||
.setRemoteInfo(remote);
|
||||
return request;
|
||||
};
|
||||
|
@ -135,12 +135,12 @@ public class RetryTests extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testUpdateByQuery() throws Exception {
|
||||
testCase(UpdateByQueryAction.NAME, client -> UpdateByQueryAction.INSTANCE.newRequestBuilder(client).source("source"),
|
||||
testCase(UpdateByQueryAction.NAME, client -> new UpdateByQueryRequestBuilder(client, UpdateByQueryAction.INSTANCE).source("source"),
|
||||
matcher().updated(DOC_COUNT));
|
||||
}
|
||||
|
||||
public void testDeleteByQuery() throws Exception {
|
||||
testCase(DeleteByQueryAction.NAME, client -> DeleteByQueryAction.INSTANCE.newRequestBuilder(client).source("source")
|
||||
testCase(DeleteByQueryAction.NAME, client -> new DeleteByQueryRequestBuilder(client, DeleteByQueryAction.INSTANCE).source("source")
|
||||
.filter(QueryBuilders.matchAllQuery()), matcher().deleted(DOC_COUNT));
|
||||
}
|
||||
|
||||
|
|
|
@ -135,14 +135,6 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query nullValueQuery() {
|
||||
if (nullValue() == null) {
|
||||
return null;
|
||||
}
|
||||
return termQuery(nullValue(), null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) {
|
||||
failIfNoDocValues();
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
"Search with missing remote index pattern":
|
||||
- do:
|
||||
catch: "request"
|
||||
catch: "missing"
|
||||
search:
|
||||
index: "my_remote_cluster:foo"
|
||||
|
||||
|
@ -34,7 +34,7 @@
|
|||
- match: { aggregations.cluster.buckets.0.doc_count: 6 }
|
||||
|
||||
- do:
|
||||
catch: "request"
|
||||
catch: "missing"
|
||||
search:
|
||||
index: "my_remote_cluster:test_index,my_remote_cluster:foo"
|
||||
body:
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.action.support.ActionFilter;
|
|||
import org.elasticsearch.action.termvectors.MultiTermVectorsRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -221,8 +222,10 @@ public class ContextAndHeaderTransportIT extends HttpSmokeTestCase {
|
|||
public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws IOException {
|
||||
final String IRRELEVANT_HEADER = "SomeIrrelevantHeader";
|
||||
Request request = new Request("GET", "/" + queryIndex + "/_search");
|
||||
request.addHeader(CUSTOM_HEADER, randomHeaderValue);
|
||||
request.addHeader(IRRELEVANT_HEADER, randomHeaderValue);
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.addHeader(CUSTOM_HEADER, randomHeaderValue);
|
||||
options.addHeader(IRRELEVANT_HEADER, randomHeaderValue);
|
||||
request.setOptions(options);
|
||||
Response response = getRestClient().performRequest(request);
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(200));
|
||||
List<RequestAndHeaders> searchRequests = getRequests(SearchRequest.class);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.http;
|
||||
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -32,8 +33,10 @@ public class CorsNotSetIT extends HttpSmokeTestCase {
|
|||
public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws IOException {
|
||||
String corsValue = "http://localhost:9200";
|
||||
Request request = new Request("GET", "/");
|
||||
request.addHeader("User-Agent", "Mozilla Bar");
|
||||
request.addHeader("Origin", corsValue);
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.addHeader("User-Agent", "Mozilla Bar");
|
||||
options.addHeader("Origin", corsValue);
|
||||
request.setOptions(options);
|
||||
Response response = getRestClient().performRequest(request);
|
||||
assertThat(response.getStatusLine().getStatusCode(), is(200));
|
||||
assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue());
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.http;
|
||||
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -55,16 +56,20 @@ public class CorsRegexIT extends HttpSmokeTestCase {
|
|||
{
|
||||
String corsValue = "http://localhost:9200";
|
||||
Request request = new Request("GET", "/");
|
||||
request.addHeader("User-Agent", "Mozilla Bar");
|
||||
request.addHeader("Origin", corsValue);
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.addHeader("User-Agent", "Mozilla Bar");
|
||||
options.addHeader("Origin", corsValue);
|
||||
request.setOptions(options);
|
||||
Response response = getRestClient().performRequest(request);
|
||||
assertResponseWithOriginHeader(response, corsValue);
|
||||
}
|
||||
{
|
||||
String corsValue = "https://localhost:9201";
|
||||
Request request = new Request("GET", "/");
|
||||
request.addHeader("User-Agent", "Mozilla Bar");
|
||||
request.addHeader("Origin", corsValue);
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.addHeader("User-Agent", "Mozilla Bar");
|
||||
options.addHeader("Origin", corsValue);
|
||||
request.setOptions(options);
|
||||
Response response = getRestClient().performRequest(request);
|
||||
assertResponseWithOriginHeader(response, corsValue);
|
||||
assertThat(response.getHeader("Access-Control-Allow-Credentials"), is("true"));
|
||||
|
@ -73,8 +78,10 @@ public class CorsRegexIT extends HttpSmokeTestCase {
|
|||
|
||||
public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws IOException {
|
||||
Request request = new Request("GET", "/");
|
||||
request.addHeader("User-Agent", "Mozilla Bar");
|
||||
request.addHeader("Origin", "http://evil-host:9200");
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.addHeader("User-Agent", "Mozilla Bar");
|
||||
options.addHeader("Origin", "http://evil-host:9200");
|
||||
request.setOptions(options);
|
||||
try {
|
||||
getRestClient().performRequest(request);
|
||||
fail("request should have failed");
|
||||
|
@ -88,7 +95,9 @@ public class CorsRegexIT extends HttpSmokeTestCase {
|
|||
|
||||
public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws IOException {
|
||||
Request request = new Request("GET", "/");
|
||||
request.addHeader("User-Agent", "Mozilla Bar");
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.addHeader("User-Agent", "Mozilla Bar");
|
||||
request.setOptions(options);
|
||||
Response response = getRestClient().performRequest(request);
|
||||
assertThat(response.getStatusLine().getStatusCode(), is(200));
|
||||
assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue());
|
||||
|
@ -103,9 +112,11 @@ public class CorsRegexIT extends HttpSmokeTestCase {
|
|||
public void testThatPreFlightRequestWorksOnMatch() throws IOException {
|
||||
String corsValue = "http://localhost:9200";
|
||||
Request request = new Request("OPTIONS", "/");
|
||||
request.addHeader("User-Agent", "Mozilla Bar");
|
||||
request.addHeader("Origin", corsValue);
|
||||
request.addHeader("Access-Control-Request-Method", "GET");
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.addHeader("User-Agent", "Mozilla Bar");
|
||||
options.addHeader("Origin", corsValue);
|
||||
options.addHeader("Access-Control-Request-Method", "GET");
|
||||
request.setOptions(options);
|
||||
Response response = getRestClient().performRequest(request);
|
||||
assertResponseWithOriginHeader(response, corsValue);
|
||||
assertNotNull(response.getHeader("Access-Control-Allow-Methods"));
|
||||
|
@ -114,9 +125,11 @@ public class CorsRegexIT extends HttpSmokeTestCase {
|
|||
public void testThatPreFlightRequestReturnsNullOnNonMatch() throws IOException {
|
||||
String corsValue = "http://evil-host:9200";
|
||||
Request request = new Request("OPTIONS", "/");
|
||||
request.addHeader("User-Agent", "Mozilla Bar");
|
||||
request.addHeader("Origin", corsValue);
|
||||
request.addHeader("Access-Control-Request-Method", "GET");
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.addHeader("User-Agent", "Mozilla Bar");
|
||||
options.addHeader("Origin", corsValue);
|
||||
options.addHeader("Access-Control-Request-Method", "GET");
|
||||
request.setOptions(options);
|
||||
try {
|
||||
getRestClient().performRequest(request);
|
||||
fail("request should have failed");
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch.http;
|
|||
|
||||
import org.apache.http.HttpHeaders;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
|
||||
|
@ -38,7 +39,9 @@ public class HttpCompressionIT extends ESRestTestCase {
|
|||
|
||||
public void testCompressesResponseIfRequested() throws IOException {
|
||||
Request request = new Request("GET", "/");
|
||||
request.addHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING);
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.addHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING);
|
||||
request.setOptions(options);
|
||||
Response response = client().performRequest(request);
|
||||
assertEquals(200, response.getStatusLine().getStatusCode());
|
||||
assertEquals(GZIP_ENCODING, response.getHeader(HttpHeaders.CONTENT_ENCODING));
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.http;
|
|||
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
|
||||
|
@ -46,7 +47,9 @@ public class NoHandlerIT extends HttpSmokeTestCase {
|
|||
private void runTestNoHandlerRespectsAcceptHeader(
|
||||
final String accept, final String contentType, final String expect) throws IOException {
|
||||
Request request = new Request("GET", "/foo/bar/baz/qux/quux");
|
||||
request.addHeader("Accept", accept);
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.addHeader("Accept", accept);
|
||||
request.setOptions(options);
|
||||
final ResponseException e = expectThrows(ResponseException.class,
|
||||
() -> getRestClient().performRequest(request));
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.http;
|
||||
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
@ -61,7 +62,9 @@ public class ResponseHeaderPluginIT extends HttpSmokeTestCase {
|
|||
}
|
||||
|
||||
Request request = new Request("GET", "/_protected");
|
||||
request.addHeader("Secret", "password");
|
||||
RequestOptions.Builder options = request.getOptions().toBuilder();
|
||||
options.addHeader("Secret", "password");
|
||||
request.setOptions(options);
|
||||
Response authResponse = getRestClient().performRequest(request);
|
||||
assertThat(authResponse.getStatusLine().getStatusCode(), equalTo(200));
|
||||
assertThat(authResponse.getHeader("Secret"), equalTo("granted"));
|
||||
|
|
|
@ -51,9 +51,6 @@ setup:
|
|||
|
||||
---
|
||||
"Verify created repository":
|
||||
- skip:
|
||||
version: "all"
|
||||
reason: AwaitsFix for https://github.com/elastic/elasticsearch/issues/30807
|
||||
- do:
|
||||
snapshot.verify_repository:
|
||||
repository: test_repo_get_2
|
||||
|
|
|
@ -336,16 +336,80 @@ setup:
|
|||
- length: { suggest.result.0.options: 1 }
|
||||
- match: { suggest.result.0.options.0.text: "foo" }
|
||||
|
||||
---
|
||||
"Indexing and Querying without contexts is deprecated":
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: this feature was deprecated in 7.0
|
||||
features: "warnings"
|
||||
|
||||
- do:
|
||||
search:
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 1
|
||||
body:
|
||||
suggest_context:
|
||||
input: "foo"
|
||||
contexts:
|
||||
color: "red"
|
||||
suggest_multi_contexts:
|
||||
input: "bar"
|
||||
contexts:
|
||||
color: "blue"
|
||||
|
||||
- do:
|
||||
warnings:
|
||||
- "The ability to index a suggestion with no context on a context enabled completion field is deprecated and will be removed in the next major release."
|
||||
index:
|
||||
index: test
|
||||
type: test
|
||||
id: 2
|
||||
body:
|
||||
suggest_context:
|
||||
input: "foo"
|
||||
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
warnings:
|
||||
- "The ability to query with no context on a context enabled completion field is deprecated and will be removed in the next major release."
|
||||
search:
|
||||
body:
|
||||
suggest:
|
||||
result:
|
||||
text: "foo"
|
||||
completion:
|
||||
skip_duplicates: true
|
||||
field: suggest_context
|
||||
|
||||
- length: { suggest.result: 1 }
|
||||
- length: { suggest.result.0.options: 1 }
|
||||
- match: { suggest.result.0.options.0.text: "foo" }
|
||||
|
||||
- do:
|
||||
warnings:
|
||||
- "The ability to query with no context on a context enabled completion field is deprecated and will be removed in the next major release."
|
||||
search:
|
||||
body:
|
||||
suggest:
|
||||
result:
|
||||
text: "foo"
|
||||
completion:
|
||||
field: suggest_context
|
||||
contexts: {}
|
||||
|
||||
- length: { suggest.result: 1 }
|
||||
|
||||
- do:
|
||||
warnings:
|
||||
- "The ability to query with no context on a context enabled completion field is deprecated and will be removed in the next major release."
|
||||
search:
|
||||
body:
|
||||
suggest:
|
||||
result:
|
||||
text: "foo"
|
||||
completion:
|
||||
field: suggest_multi_contexts
|
||||
contexts:
|
||||
location: []
|
||||
|
||||
- length: { suggest.result: 1 }
|
||||
|
|
|
@ -19,7 +19,9 @@ setup:
|
|||
"type" : "category"
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
warnings:
|
||||
- "The ability to index a suggestion with no context on a context enabled completion field is deprecated and will be removed in the next major release."
|
||||
bulk:
|
||||
refresh: true
|
||||
index: test
|
||||
type: test
|
||||
|
@ -31,8 +33,14 @@ setup:
|
|||
|
||||
---
|
||||
"Test typed keys parameter for suggesters":
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: queying a context suggester with no context was deprecated in 7.0
|
||||
features: "warnings"
|
||||
|
||||
- do:
|
||||
warnings:
|
||||
- "The ability to query with no context on a context enabled completion field is deprecated and will be removed in the next major release."
|
||||
search:
|
||||
typed_keys: true
|
||||
body:
|
||||
|
|
|
@ -19,12 +19,8 @@
|
|||
|
||||
package org.apache.lucene.queries;
|
||||
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermContext;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
|
||||
/**
|
||||
* Extended version of {@link CommonTermsQuery} that allows to pass in a
|
||||
|
@ -33,11 +29,8 @@ import org.elasticsearch.index.mapper.MappedFieldType;
|
|||
*/
|
||||
public class ExtendedCommonTermsQuery extends CommonTermsQuery {
|
||||
|
||||
private final MappedFieldType fieldType;
|
||||
|
||||
public ExtendedCommonTermsQuery(Occur highFreqOccur, Occur lowFreqOccur, float maxTermFrequency, MappedFieldType fieldType) {
|
||||
public ExtendedCommonTermsQuery(Occur highFreqOccur, Occur lowFreqOccur, float maxTermFrequency) {
|
||||
super(highFreqOccur, lowFreqOccur, maxTermFrequency);
|
||||
this.fieldType = fieldType;
|
||||
}
|
||||
|
||||
private String lowFreqMinNumShouldMatchSpec;
|
||||
|
@ -80,16 +73,4 @@ public class ExtendedCommonTermsQuery extends CommonTermsQuery {
|
|||
return this.maxTermFrequency;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Query newTermQuery(Term term, TermContext context) {
|
||||
if (fieldType == null) {
|
||||
return super.newTermQuery(term, context);
|
||||
}
|
||||
final Query query = fieldType.queryStringTermQuery(term);
|
||||
if (query == null) {
|
||||
return super.newTermQuery(term, context);
|
||||
} else {
|
||||
return query;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -269,7 +269,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Retrieve the innermost cause of this exception, if none, returns the current exception.
|
||||
*/
|
||||
|
@ -292,7 +291,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString);
|
||||
out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString);
|
||||
} else {
|
||||
HashMap<String, List<String>> finalHeaders = new HashMap<>(headers.size() + metadata.size());
|
||||
Map<String, List<String>> finalHeaders = new HashMap<>(headers.size() + metadata.size());
|
||||
finalHeaders.putAll(headers);
|
||||
finalHeaders.putAll(metadata);
|
||||
out.writeMapOfLists(finalHeaders, StreamOutput::writeString, StreamOutput::writeString);
|
||||
|
|
|
@ -19,20 +19,13 @@
|
|||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Base action. Supports building the <code>Request</code> through a <code>RequestBuilder</code>.
|
||||
*/
|
||||
public abstract class Action<Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>>
|
||||
public abstract class Action<Request extends ActionRequest, Response extends ActionResponse>
|
||||
extends GenericAction<Request, Response> {
|
||||
|
||||
protected Action(String name) {
|
||||
super(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new request builder given the client provided as argument
|
||||
*/
|
||||
public abstract RequestBuilder newRequestBuilder(ElasticsearchClient client);
|
||||
}
|
||||
|
|
|
@ -24,14 +24,13 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
|
||||
import java.util.Objects;
|
||||
|
||||
public abstract class ActionRequestBuilder<Request extends ActionRequest, Response extends ActionResponse,
|
||||
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> {
|
||||
public abstract class ActionRequestBuilder<Request extends ActionRequest, Response extends ActionResponse> {
|
||||
|
||||
protected final Action<Request, Response, RequestBuilder> action;
|
||||
protected final Action<Request, Response> action;
|
||||
protected final Request request;
|
||||
protected final ElasticsearchClient client;
|
||||
|
||||
protected ActionRequestBuilder(ElasticsearchClient client, Action<Request, Response, RequestBuilder> action, Request request) {
|
||||
protected ActionRequestBuilder(ElasticsearchClient client, Action<Request, Response> action, Request request) {
|
||||
Objects.requireNonNull(action, "action must not be null");
|
||||
this.action = action;
|
||||
this.request = request;
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.action;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -48,4 +49,9 @@ public class FailedNodeException extends ElasticsearchException {
|
|||
super.writeTo(out);
|
||||
out.writeOptionalString(nodeId);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void metadataToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("node_id", nodeId);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,9 +25,7 @@ import org.elasticsearch.client.ElasticsearchClient;
|
|||
/**
|
||||
* Action for explaining shard allocation for a shard in the cluster
|
||||
*/
|
||||
public class ClusterAllocationExplainAction extends Action<ClusterAllocationExplainRequest,
|
||||
ClusterAllocationExplainResponse,
|
||||
ClusterAllocationExplainRequestBuilder> {
|
||||
public class ClusterAllocationExplainAction extends Action<ClusterAllocationExplainRequest, ClusterAllocationExplainResponse> {
|
||||
|
||||
public static final ClusterAllocationExplainAction INSTANCE = new ClusterAllocationExplainAction();
|
||||
public static final String NAME = "cluster:monitor/allocation/explain";
|
||||
|
@ -40,9 +38,4 @@ public class ClusterAllocationExplainAction extends Action<ClusterAllocationExpl
|
|||
public ClusterAllocationExplainResponse newResponse() {
|
||||
return new ClusterAllocationExplainResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterAllocationExplainRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ClusterAllocationExplainRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,8 @@
|
|||
package org.elasticsearch.action.admin.cluster.health;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class ClusterHealthAction extends Action<ClusterHealthRequest, ClusterHealthResponse, ClusterHealthRequestBuilder> {
|
||||
public class ClusterHealthAction extends Action<ClusterHealthRequest, ClusterHealthResponse> {
|
||||
|
||||
public static final ClusterHealthAction INSTANCE = new ClusterHealthAction();
|
||||
public static final String NAME = "cluster:monitor/health";
|
||||
|
@ -35,9 +34,4 @@ public class ClusterHealthAction extends Action<ClusterHealthRequest, ClusterHea
|
|||
public ClusterHealthResponse newResponse() {
|
||||
return new ClusterHealthResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterHealthRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ClusterHealthRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,8 @@
|
|||
package org.elasticsearch.action.admin.cluster.node.hotthreads;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class NodesHotThreadsAction extends Action<NodesHotThreadsRequest, NodesHotThreadsResponse, NodesHotThreadsRequestBuilder> {
|
||||
public class NodesHotThreadsAction extends Action<NodesHotThreadsRequest, NodesHotThreadsResponse> {
|
||||
|
||||
public static final NodesHotThreadsAction INSTANCE = new NodesHotThreadsAction();
|
||||
public static final String NAME = "cluster:monitor/nodes/hot_threads";
|
||||
|
@ -35,9 +34,4 @@ public class NodesHotThreadsAction extends Action<NodesHotThreadsRequest, NodesH
|
|||
public NodesHotThreadsResponse newResponse() {
|
||||
return new NodesHotThreadsResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodesHotThreadsRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new NodesHotThreadsRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,8 @@
|
|||
package org.elasticsearch.action.admin.cluster.node.info;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class NodesInfoAction extends Action<NodesInfoRequest, NodesInfoResponse, NodesInfoRequestBuilder> {
|
||||
public class NodesInfoAction extends Action<NodesInfoRequest, NodesInfoResponse> {
|
||||
|
||||
public static final NodesInfoAction INSTANCE = new NodesInfoAction();
|
||||
public static final String NAME = "cluster:monitor/nodes/info";
|
||||
|
@ -35,9 +34,4 @@ public class NodesInfoAction extends Action<NodesInfoRequest, NodesInfoResponse,
|
|||
public NodesInfoResponse newResponse() {
|
||||
return new NodesInfoResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodesInfoRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new NodesInfoRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,8 @@
|
|||
package org.elasticsearch.action.admin.cluster.node.stats;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class NodesStatsAction extends Action<NodesStatsRequest, NodesStatsResponse, NodesStatsRequestBuilder> {
|
||||
public class NodesStatsAction extends Action<NodesStatsRequest, NodesStatsResponse> {
|
||||
|
||||
public static final NodesStatsAction INSTANCE = new NodesStatsAction();
|
||||
public static final String NAME = "cluster:monitor/nodes/stats";
|
||||
|
@ -35,9 +34,4 @@ public class NodesStatsAction extends Action<NodesStatsRequest, NodesStatsRespon
|
|||
public NodesStatsResponse newResponse() {
|
||||
return new NodesStatsResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodesStatsRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new NodesStatsRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Action for cancelling running tasks
|
||||
*/
|
||||
public class CancelTasksAction extends Action<CancelTasksRequest, CancelTasksResponse, CancelTasksRequestBuilder> {
|
||||
public class CancelTasksAction extends Action<CancelTasksRequest, CancelTasksResponse> {
|
||||
|
||||
public static final CancelTasksAction INSTANCE = new CancelTasksAction();
|
||||
public static final String NAME = "cluster:admin/tasks/cancel";
|
||||
|
@ -38,9 +37,4 @@ public class CancelTasksAction extends Action<CancelTasksRequest, CancelTasksRes
|
|||
public CancelTasksResponse newResponse() {
|
||||
return new CancelTasksResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public CancelTasksRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new CancelTasksRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
package org.elasticsearch.action.admin.cluster.node.tasks.get;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Action for retrieving a list of currently running tasks
|
||||
*/
|
||||
public class GetTaskAction extends Action<GetTaskRequest, GetTaskResponse, GetTaskRequestBuilder> {
|
||||
public class GetTaskAction extends Action<GetTaskRequest, GetTaskResponse> {
|
||||
|
||||
public static final GetTaskAction INSTANCE = new GetTaskAction();
|
||||
public static final String NAME = "cluster:monitor/task/get";
|
||||
|
@ -38,9 +37,4 @@ public class GetTaskAction extends Action<GetTaskRequest, GetTaskResponse, GetTa
|
|||
public GetTaskResponse newResponse() {
|
||||
return new GetTaskResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetTaskRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new GetTaskRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.tasks.TaskId;
|
|||
/**
|
||||
* Builder for the request to retrieve the list of tasks running on the specified nodes
|
||||
*/
|
||||
public class GetTaskRequestBuilder extends ActionRequestBuilder<GetTaskRequest, GetTaskResponse, GetTaskRequestBuilder> {
|
||||
public class GetTaskRequestBuilder extends ActionRequestBuilder<GetTaskRequest, GetTaskResponse> {
|
||||
public GetTaskRequestBuilder(ElasticsearchClient client, GetTaskAction action) {
|
||||
super(client, action, new GetTaskRequest());
|
||||
}
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
package org.elasticsearch.action.admin.cluster.node.tasks.list;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Action for retrieving a list of currently running tasks
|
||||
*/
|
||||
public class ListTasksAction extends Action<ListTasksRequest, ListTasksResponse, ListTasksRequestBuilder> {
|
||||
public class ListTasksAction extends Action<ListTasksRequest, ListTasksResponse> {
|
||||
|
||||
public static final ListTasksAction INSTANCE = new ListTasksAction();
|
||||
public static final String NAME = "cluster:monitor/tasks/lists";
|
||||
|
@ -38,9 +37,4 @@ public class ListTasksAction extends Action<ListTasksRequest, ListTasksResponse,
|
|||
public ListTasksResponse newResponse() {
|
||||
return new ListTasksResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ListTasksRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ListTasksRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -266,6 +266,6 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContentOb
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this);
|
||||
return Strings.toString(this, true, true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,8 @@
|
|||
package org.elasticsearch.action.admin.cluster.node.usage;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class NodesUsageAction extends Action<NodesUsageRequest, NodesUsageResponse, NodesUsageRequestBuilder> {
|
||||
public class NodesUsageAction extends Action<NodesUsageRequest, NodesUsageResponse> {
|
||||
|
||||
public static final NodesUsageAction INSTANCE = new NodesUsageAction();
|
||||
public static final String NAME = "cluster:monitor/nodes/usage";
|
||||
|
@ -31,11 +30,6 @@ public class NodesUsageAction extends Action<NodesUsageRequest, NodesUsageRespon
|
|||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodesUsageRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new NodesUsageRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodesUsageResponse newResponse() {
|
||||
return new NodesUsageResponse();
|
||||
|
|
|
@ -26,8 +26,7 @@ import org.elasticsearch.client.ElasticsearchClient;
|
|||
public class NodesUsageRequestBuilder
|
||||
extends NodesOperationRequestBuilder<NodesUsageRequest, NodesUsageResponse, NodesUsageRequestBuilder> {
|
||||
|
||||
public NodesUsageRequestBuilder(ElasticsearchClient client,
|
||||
Action<NodesUsageRequest, NodesUsageResponse, NodesUsageRequestBuilder> action) {
|
||||
public NodesUsageRequestBuilder(ElasticsearchClient client, Action<NodesUsageRequest, NodesUsageResponse> action) {
|
||||
super(client, action, new NodesUsageRequest());
|
||||
}
|
||||
|
||||
|
|
|
@ -20,9 +20,8 @@
|
|||
package org.elasticsearch.action.admin.cluster.remote;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public final class RemoteInfoAction extends Action<RemoteInfoRequest, RemoteInfoResponse, RemoteInfoRequestBuilder> {
|
||||
public final class RemoteInfoAction extends Action<RemoteInfoRequest, RemoteInfoResponse> {
|
||||
|
||||
public static final String NAME = "cluster:monitor/remote/info";
|
||||
public static final RemoteInfoAction INSTANCE = new RemoteInfoAction();
|
||||
|
@ -31,11 +30,6 @@ public final class RemoteInfoAction extends Action<RemoteInfoRequest, RemoteInfo
|
|||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RemoteInfoRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new RemoteInfoRequestBuilder(client, INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RemoteInfoResponse newResponse() {
|
||||
return new RemoteInfoResponse();
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.cluster.remote;
|
|||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public final class RemoteInfoRequestBuilder extends ActionRequestBuilder<RemoteInfoRequest, RemoteInfoResponse, RemoteInfoRequestBuilder> {
|
||||
public final class RemoteInfoRequestBuilder extends ActionRequestBuilder<RemoteInfoRequest, RemoteInfoResponse> {
|
||||
|
||||
public RemoteInfoRequestBuilder(ElasticsearchClient client, RemoteInfoAction action) {
|
||||
super(client, action, new RemoteInfoRequest());
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
package org.elasticsearch.action.admin.cluster.repositories.delete;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Unregister repository action
|
||||
*/
|
||||
public class DeleteRepositoryAction extends Action<DeleteRepositoryRequest, DeleteRepositoryResponse, DeleteRepositoryRequestBuilder> {
|
||||
public class DeleteRepositoryAction extends Action<DeleteRepositoryRequest, DeleteRepositoryResponse> {
|
||||
|
||||
public static final DeleteRepositoryAction INSTANCE = new DeleteRepositoryAction();
|
||||
public static final String NAME = "cluster:admin/repository/delete";
|
||||
|
@ -38,10 +37,5 @@ public class DeleteRepositoryAction extends Action<DeleteRepositoryRequest, Dele
|
|||
public DeleteRepositoryResponse newResponse() {
|
||||
return new DeleteRepositoryResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteRepositoryRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new DeleteRepositoryRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
package org.elasticsearch.action.admin.cluster.repositories.get;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Get repositories action
|
||||
*/
|
||||
public class GetRepositoriesAction extends Action<GetRepositoriesRequest, GetRepositoriesResponse, GetRepositoriesRequestBuilder> {
|
||||
public class GetRepositoriesAction extends Action<GetRepositoriesRequest, GetRepositoriesResponse> {
|
||||
|
||||
public static final GetRepositoriesAction INSTANCE = new GetRepositoriesAction();
|
||||
public static final String NAME = "cluster:admin/repository/get";
|
||||
|
@ -38,10 +37,5 @@ public class GetRepositoriesAction extends Action<GetRepositoriesRequest, GetRep
|
|||
public GetRepositoriesResponse newResponse() {
|
||||
return new GetRepositoriesResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetRepositoriesRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new GetRepositoriesRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
package org.elasticsearch.action.admin.cluster.repositories.put;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Register repository action
|
||||
*/
|
||||
public class PutRepositoryAction extends Action<PutRepositoryRequest, PutRepositoryResponse, PutRepositoryRequestBuilder> {
|
||||
public class PutRepositoryAction extends Action<PutRepositoryRequest, PutRepositoryResponse> {
|
||||
|
||||
public static final PutRepositoryAction INSTANCE = new PutRepositoryAction();
|
||||
public static final String NAME = "cluster:admin/repository/put";
|
||||
|
@ -38,10 +37,5 @@ public class PutRepositoryAction extends Action<PutRepositoryRequest, PutReposit
|
|||
public PutRepositoryResponse newResponse() {
|
||||
return new PutRepositoryResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PutRepositoryRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new PutRepositoryRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ public class TransportVerifyRepositoryAction extends TransportMasterNodeAction<V
|
|||
if (verifyResponse.failed()) {
|
||||
listener.onFailure(new RepositoryVerificationException(request.name(), verifyResponse.failureDescription()));
|
||||
} else {
|
||||
listener.onResponse(new VerifyRepositoryResponse(clusterService.getClusterName(), verifyResponse.nodes()));
|
||||
listener.onResponse(new VerifyRepositoryResponse(verifyResponse.nodes()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
package org.elasticsearch.action.admin.cluster.repositories.verify;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Unregister repository action
|
||||
*/
|
||||
public class VerifyRepositoryAction extends Action<VerifyRepositoryRequest, VerifyRepositoryResponse, VerifyRepositoryRequestBuilder> {
|
||||
public class VerifyRepositoryAction extends Action<VerifyRepositoryRequest, VerifyRepositoryResponse> {
|
||||
|
||||
public static final VerifyRepositoryAction INSTANCE = new VerifyRepositoryAction();
|
||||
public static final String NAME = "cluster:admin/repository/verify";
|
||||
|
@ -38,10 +37,5 @@ public class VerifyRepositoryAction extends Action<VerifyRepositoryRequest, Veri
|
|||
public VerifyRepositoryResponse newResponse() {
|
||||
return new VerifyRepositoryResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public VerifyRepositoryRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new VerifyRepositoryRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,16 +19,13 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.repositories.verify;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -36,7 +33,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -92,20 +88,6 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
|||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Temporary method that allows turning a {@link NodeView} into a {@link DiscoveryNode}. This representation will never be used in
|
||||
* practice, because in >= 6.4 a consumer of the response will only be able to retrieve a representation of {@link NodeView}
|
||||
* objects.
|
||||
*
|
||||
* Effectively this will be used to hold the state of the object in 6.x so there is no need to have 2 backing objects that
|
||||
* represent the state of the Response. In practice these will always be read by a consumer as a NodeView, but it eases the
|
||||
* transition to master which will not contain any representation of a {@link DiscoveryNode}.
|
||||
*/
|
||||
DiscoveryNode convertToDiscoveryNode() {
|
||||
return new DiscoveryNode(name, nodeId, "", "", "", new TransportAddress(TransportAddress.META_ADDRESS, 0),
|
||||
Collections.emptyMap(), Collections.emptySet(), Version.CURRENT);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
|
@ -125,10 +107,7 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
|||
}
|
||||
}
|
||||
|
||||
private List<DiscoveryNode> nodes;
|
||||
|
||||
private ClusterName clusterName;
|
||||
|
||||
private List<NodeView> nodes;
|
||||
|
||||
private static final ObjectParser<VerifyRepositoryResponse, Void> PARSER =
|
||||
new ObjectParser<>(VerifyRepositoryResponse.class.getName(), VerifyRepositoryResponse::new);
|
||||
|
@ -139,43 +118,28 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
|||
VerifyRepositoryResponse() {
|
||||
}
|
||||
|
||||
public VerifyRepositoryResponse(ClusterName clusterName, DiscoveryNode[] nodes) {
|
||||
this.clusterName = clusterName;
|
||||
this.nodes = Arrays.asList(nodes);
|
||||
public VerifyRepositoryResponse(DiscoveryNode[] nodes) {
|
||||
this.nodes = Arrays.stream(nodes).map(dn -> new NodeView(dn.getId(), dn.getName())).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
this.nodes = in.readList(NodeView::new).stream().map(n -> n.convertToDiscoveryNode()).collect(Collectors.toList());
|
||||
} else {
|
||||
clusterName = new ClusterName(in);
|
||||
this.nodes = in.readList(DiscoveryNode::new);
|
||||
}
|
||||
this.nodes = in.readList(NodeView::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
out.writeList(getNodes());
|
||||
} else {
|
||||
clusterName.writeTo(out);
|
||||
out.writeList(nodes);
|
||||
}
|
||||
out.writeList(nodes);
|
||||
}
|
||||
|
||||
public List<NodeView> getNodes() {
|
||||
return nodes.stream().map(dn -> new NodeView(dn.getId(), dn.getName())).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public ClusterName getClusterName() {
|
||||
return clusterName;
|
||||
return nodes;
|
||||
}
|
||||
|
||||
protected void setNodes(List<NodeView> nodes) {
|
||||
this.nodes = nodes.stream().map(n -> n.convertToDiscoveryNode()).collect(Collectors.toList());
|
||||
this.nodes = nodes;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -184,12 +148,8 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
|||
{
|
||||
builder.startObject(NODES);
|
||||
{
|
||||
for (DiscoveryNode node : nodes) {
|
||||
builder.startObject(node.getId());
|
||||
{
|
||||
builder.field(NAME, node.getName());
|
||||
}
|
||||
builder.endObject();
|
||||
for (NodeView node : nodes) {
|
||||
node.toXContent(builder, params);
|
||||
}
|
||||
}
|
||||
builder.endObject();
|
||||
|
|
|
@ -20,9 +20,8 @@
|
|||
package org.elasticsearch.action.admin.cluster.reroute;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class ClusterRerouteAction extends Action<ClusterRerouteRequest, ClusterRerouteResponse, ClusterRerouteRequestBuilder> {
|
||||
public class ClusterRerouteAction extends Action<ClusterRerouteRequest, ClusterRerouteResponse> {
|
||||
|
||||
public static final ClusterRerouteAction INSTANCE = new ClusterRerouteAction();
|
||||
public static final String NAME = "cluster:admin/reroute";
|
||||
|
@ -35,9 +34,4 @@ public class ClusterRerouteAction extends Action<ClusterRerouteRequest, ClusterR
|
|||
public ClusterRerouteResponse newResponse() {
|
||||
return new ClusterRerouteResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterRerouteRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ClusterRerouteRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,9 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.reroute;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.cluster.ClusterModule;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -70,7 +72,11 @@ public class ClusterRerouteResponse extends AcknowledgedResponse implements ToXC
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
state.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_3_0)) {
|
||||
state.writeTo(out);
|
||||
} else {
|
||||
ClusterModule.filterCustomsForPre63Clients(state).writeTo(out);
|
||||
}
|
||||
writeAcknowledged(out);
|
||||
RoutingExplanations.writeTo(explanations, out);
|
||||
}
|
||||
|
|
|
@ -20,9 +20,8 @@
|
|||
package org.elasticsearch.action.admin.cluster.settings;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class ClusterUpdateSettingsAction extends Action<ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse, ClusterUpdateSettingsRequestBuilder> {
|
||||
public class ClusterUpdateSettingsAction extends Action<ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse> {
|
||||
|
||||
public static final ClusterUpdateSettingsAction INSTANCE = new ClusterUpdateSettingsAction();
|
||||
public static final String NAME = "cluster:admin/settings/update";
|
||||
|
@ -35,9 +34,4 @@ public class ClusterUpdateSettingsAction extends Action<ClusterUpdateSettingsReq
|
|||
public ClusterUpdateSettingsResponse newResponse() {
|
||||
return new ClusterUpdateSettingsResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterUpdateSettingsRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ClusterUpdateSettingsRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,9 +20,8 @@
|
|||
package org.elasticsearch.action.admin.cluster.shards;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class ClusterSearchShardsAction extends Action<ClusterSearchShardsRequest, ClusterSearchShardsResponse, ClusterSearchShardsRequestBuilder> {
|
||||
public class ClusterSearchShardsAction extends Action<ClusterSearchShardsRequest, ClusterSearchShardsResponse> {
|
||||
|
||||
public static final ClusterSearchShardsAction INSTANCE = new ClusterSearchShardsAction();
|
||||
public static final String NAME = "indices:admin/shards/search_shards";
|
||||
|
@ -35,9 +34,4 @@ public class ClusterSearchShardsAction extends Action<ClusterSearchShardsRequest
|
|||
public ClusterSearchShardsResponse newResponse() {
|
||||
return new ClusterSearchShardsResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterSearchShardsRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ClusterSearchShardsRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
package org.elasticsearch.action.admin.cluster.snapshots.create;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Create snapshot action
|
||||
*/
|
||||
public class CreateSnapshotAction extends Action<CreateSnapshotRequest, CreateSnapshotResponse, CreateSnapshotRequestBuilder> {
|
||||
public class CreateSnapshotAction extends Action<CreateSnapshotRequest, CreateSnapshotResponse> {
|
||||
|
||||
public static final CreateSnapshotAction INSTANCE = new CreateSnapshotAction();
|
||||
public static final String NAME = "cluster:admin/snapshot/create";
|
||||
|
@ -38,10 +37,5 @@ public class CreateSnapshotAction extends Action<CreateSnapshotRequest, CreateSn
|
|||
public CreateSnapshotResponse newResponse() {
|
||||
return new CreateSnapshotResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public CreateSnapshotRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new CreateSnapshotRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
package org.elasticsearch.action.admin.cluster.snapshots.delete;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Delete snapshot action
|
||||
*/
|
||||
public class DeleteSnapshotAction extends Action<DeleteSnapshotRequest, DeleteSnapshotResponse, DeleteSnapshotRequestBuilder> {
|
||||
public class DeleteSnapshotAction extends Action<DeleteSnapshotRequest, DeleteSnapshotResponse> {
|
||||
|
||||
public static final DeleteSnapshotAction INSTANCE = new DeleteSnapshotAction();
|
||||
public static final String NAME = "cluster:admin/snapshot/delete";
|
||||
|
@ -38,10 +37,5 @@ public class DeleteSnapshotAction extends Action<DeleteSnapshotRequest, DeleteSn
|
|||
public DeleteSnapshotResponse newResponse() {
|
||||
return new DeleteSnapshotResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteSnapshotRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new DeleteSnapshotRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
package org.elasticsearch.action.admin.cluster.snapshots.get;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Get snapshots action
|
||||
*/
|
||||
public class GetSnapshotsAction extends Action<GetSnapshotsRequest, GetSnapshotsResponse, GetSnapshotsRequestBuilder> {
|
||||
public class GetSnapshotsAction extends Action<GetSnapshotsRequest, GetSnapshotsResponse> {
|
||||
|
||||
public static final GetSnapshotsAction INSTANCE = new GetSnapshotsAction();
|
||||
public static final String NAME = "cluster:admin/snapshot/get";
|
||||
|
@ -38,10 +37,5 @@ public class GetSnapshotsAction extends Action<GetSnapshotsRequest, GetSnapshots
|
|||
public GetSnapshotsResponse newResponse() {
|
||||
return new GetSnapshotsResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetSnapshotsRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new GetSnapshotsRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
package org.elasticsearch.action.admin.cluster.snapshots.restore;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Restore snapshot action
|
||||
*/
|
||||
public class RestoreSnapshotAction extends Action<RestoreSnapshotRequest, RestoreSnapshotResponse, RestoreSnapshotRequestBuilder> {
|
||||
public class RestoreSnapshotAction extends Action<RestoreSnapshotRequest, RestoreSnapshotResponse> {
|
||||
|
||||
public static final RestoreSnapshotAction INSTANCE = new RestoreSnapshotAction();
|
||||
public static final String NAME = "cluster:admin/snapshot/restore";
|
||||
|
@ -38,10 +37,5 @@ public class RestoreSnapshotAction extends Action<RestoreSnapshotRequest, Restor
|
|||
public RestoreSnapshotResponse newResponse() {
|
||||
return new RestoreSnapshotResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestoreSnapshotRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new RestoreSnapshotRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,12 +20,11 @@
|
|||
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Snapshots status action
|
||||
*/
|
||||
public class SnapshotsStatusAction extends Action<SnapshotsStatusRequest, SnapshotsStatusResponse, SnapshotsStatusRequestBuilder> {
|
||||
public class SnapshotsStatusAction extends Action<SnapshotsStatusRequest, SnapshotsStatusResponse> {
|
||||
|
||||
public static final SnapshotsStatusAction INSTANCE = new SnapshotsStatusAction();
|
||||
public static final String NAME = "cluster:admin/snapshot/status";
|
||||
|
@ -38,10 +37,5 @@ public class SnapshotsStatusAction extends Action<SnapshotsStatusRequest, Snapsh
|
|||
public SnapshotsStatusResponse newResponse() {
|
||||
return new SnapshotsStatusResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SnapshotsStatusRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new SnapshotsStatusRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,9 +20,8 @@
|
|||
package org.elasticsearch.action.admin.cluster.state;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class ClusterStateAction extends Action<ClusterStateRequest, ClusterStateResponse, ClusterStateRequestBuilder> {
|
||||
public class ClusterStateAction extends Action<ClusterStateRequest, ClusterStateResponse> {
|
||||
|
||||
public static final ClusterStateAction INSTANCE = new ClusterStateAction();
|
||||
public static final String NAME = "cluster:monitor/state";
|
||||
|
@ -35,9 +34,4 @@ public class ClusterStateAction extends Action<ClusterStateRequest, ClusterState
|
|||
public ClusterStateResponse newResponse() {
|
||||
return new ClusterStateResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterStateRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ClusterStateRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.cluster.state;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.ClusterModule;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -94,7 +95,11 @@ public class ClusterStateResponse extends ActionResponse {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
clusterName.writeTo(out);
|
||||
clusterState.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_3_0)) {
|
||||
clusterState.writeTo(out);
|
||||
} else {
|
||||
ClusterModule.filterCustomsForPre63Clients(clusterState).writeTo(out);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
totalCompressedSize.writeTo(out);
|
||||
}
|
||||
|
|
|
@ -20,9 +20,8 @@
|
|||
package org.elasticsearch.action.admin.cluster.stats;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class ClusterStatsAction extends Action<ClusterStatsRequest, ClusterStatsResponse, ClusterStatsRequestBuilder> {
|
||||
public class ClusterStatsAction extends Action<ClusterStatsRequest, ClusterStatsResponse> {
|
||||
|
||||
public static final ClusterStatsAction INSTANCE = new ClusterStatsAction();
|
||||
public static final String NAME = "cluster:monitor/stats";
|
||||
|
@ -35,9 +34,4 @@ public class ClusterStatsAction extends Action<ClusterStatsRequest, ClusterStats
|
|||
public ClusterStatsResponse newResponse() {
|
||||
return new ClusterStatsResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterStatsRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new ClusterStatsRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,10 +20,8 @@
|
|||
package org.elasticsearch.action.admin.cluster.storedscripts;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class DeleteStoredScriptAction extends Action<DeleteStoredScriptRequest, DeleteStoredScriptResponse,
|
||||
DeleteStoredScriptRequestBuilder> {
|
||||
public class DeleteStoredScriptAction extends Action<DeleteStoredScriptRequest, DeleteStoredScriptResponse> {
|
||||
|
||||
public static final DeleteStoredScriptAction INSTANCE = new DeleteStoredScriptAction();
|
||||
public static final String NAME = "cluster:admin/script/delete";
|
||||
|
@ -36,9 +34,4 @@ public class DeleteStoredScriptAction extends Action<DeleteStoredScriptRequest,
|
|||
public DeleteStoredScriptResponse newResponse() {
|
||||
return new DeleteStoredScriptResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteStoredScriptRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new DeleteStoredScriptRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,10 +20,8 @@
|
|||
package org.elasticsearch.action.admin.cluster.storedscripts;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class GetStoredScriptAction extends Action<GetStoredScriptRequest, GetStoredScriptResponse,
|
||||
GetStoredScriptRequestBuilder> {
|
||||
public class GetStoredScriptAction extends Action<GetStoredScriptRequest, GetStoredScriptResponse> {
|
||||
|
||||
public static final GetStoredScriptAction INSTANCE = new GetStoredScriptAction();
|
||||
public static final String NAME = "cluster:admin/script/get";
|
||||
|
@ -36,10 +34,4 @@ public class GetStoredScriptAction extends Action<GetStoredScriptRequest, GetSto
|
|||
public GetStoredScriptResponse newResponse() {
|
||||
return new GetStoredScriptResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetStoredScriptRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new GetStoredScriptRequestBuilder(client, this);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,8 +23,7 @@ import org.elasticsearch.action.Action;
|
|||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
|
||||
public class PutStoredScriptAction extends Action<PutStoredScriptRequest, PutStoredScriptResponse,
|
||||
PutStoredScriptRequestBuilder> {
|
||||
public class PutStoredScriptAction extends Action<PutStoredScriptRequest, PutStoredScriptResponse> {
|
||||
|
||||
public static final PutStoredScriptAction INSTANCE = new PutStoredScriptAction();
|
||||
public static final String NAME = "cluster:admin/script/put";
|
||||
|
@ -39,9 +38,4 @@ public class PutStoredScriptAction extends Action<PutStoredScriptRequest, PutSto
|
|||
public PutStoredScriptResponse newResponse() {
|
||||
return new PutStoredScriptResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PutStoredScriptRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new PutStoredScriptRequestBuilder(client, this);
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue