Merge remote-tracking branch 'elastic/master' into zen2

This commit is contained in:
Yannick Welsch 2018-12-05 11:39:38 +01:00
commit 42457b5960
34 changed files with 1259 additions and 222 deletions

View File

@ -58,10 +58,10 @@ import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.core.CountRequest; import org.elasticsearch.client.core.CountRequest;
import org.elasticsearch.client.core.CountResponse; import org.elasticsearch.client.core.CountResponse;
import org.elasticsearch.client.core.TermVectorsResponse;
import org.elasticsearch.client.core.TermVectorsRequest;
import org.elasticsearch.client.core.MultiTermVectorsRequest; import org.elasticsearch.client.core.MultiTermVectorsRequest;
import org.elasticsearch.client.core.MultiTermVectorsResponse; import org.elasticsearch.client.core.MultiTermVectorsResponse;
import org.elasticsearch.client.core.TermVectorsRequest;
import org.elasticsearch.client.core.TermVectorsResponse;
import org.elasticsearch.client.tasks.TaskSubmissionResponse; import org.elasticsearch.client.tasks.TaskSubmissionResponse;
import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedConsumer;
import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.CheckedFunction;
@ -139,6 +139,7 @@ import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles;
import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentileRanks; import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentileRanks;
import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles; import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles;
import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.ParsedAvg; import org.elasticsearch.search.aggregations.metrics.ParsedAvg;
import org.elasticsearch.search.aggregations.metrics.ParsedCardinality; import org.elasticsearch.search.aggregations.metrics.ParsedCardinality;
@ -148,6 +149,7 @@ import org.elasticsearch.search.aggregations.metrics.ParsedGeoCentroid;
import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentileRanks; import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentileRanks;
import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentiles; import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentiles;
import org.elasticsearch.search.aggregations.metrics.ParsedMax; import org.elasticsearch.search.aggregations.metrics.ParsedMax;
import org.elasticsearch.search.aggregations.metrics.ParsedMedianAbsoluteDeviation;
import org.elasticsearch.search.aggregations.metrics.ParsedMin; import org.elasticsearch.search.aggregations.metrics.ParsedMin;
import org.elasticsearch.search.aggregations.metrics.ParsedScriptedMetric; import org.elasticsearch.search.aggregations.metrics.ParsedScriptedMetric;
import org.elasticsearch.search.aggregations.metrics.ParsedStats; import org.elasticsearch.search.aggregations.metrics.ParsedStats;
@ -161,20 +163,18 @@ import org.elasticsearch.search.aggregations.metrics.StatsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.ParsedMedianAbsoluteDeviation;
import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;
import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue;
import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue;
import org.elasticsearch.search.aggregations.pipeline.ParsedBucketMetricValue;
import org.elasticsearch.search.aggregations.pipeline.ParsedPercentilesBucket;
import org.elasticsearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.ParsedStatsBucket;
import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.ParsedExtendedStatsBucket;
import org.elasticsearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue;
import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue;
import org.elasticsearch.search.aggregations.pipeline.ParsedBucketMetricValue;
import org.elasticsearch.search.aggregations.pipeline.ParsedDerivative; import org.elasticsearch.search.aggregations.pipeline.ParsedDerivative;
import org.elasticsearch.search.aggregations.pipeline.ParsedExtendedStatsBucket;
import org.elasticsearch.search.aggregations.pipeline.ParsedPercentilesBucket;
import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue;
import org.elasticsearch.search.aggregations.pipeline.ParsedStatsBucket;
import org.elasticsearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder;
import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.Suggest;
import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import org.elasticsearch.search.suggest.completion.CompletionSuggestion;
import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder;
@ -203,13 +203,33 @@ import static java.util.Collections.singleton;
import static java.util.stream.Collectors.toList; import static java.util.stream.Collectors.toList;
/** /**
* High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses. * High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses. The
* The {@link RestClient} instance is internally built based on the provided {@link RestClientBuilder} and it gets closed automatically * {@link RestClient} instance is internally built based on the provided {@link RestClientBuilder} and it gets closed automatically when
* when closing the {@link RestHighLevelClient} instance that wraps it. * closing the {@link RestHighLevelClient} instance that wraps it.
* <p>
*
* In case an already existing instance of a low-level REST client needs to be provided, this class can be subclassed and the * In case an already existing instance of a low-level REST client needs to be provided, this class can be subclassed and the
* {@link #RestHighLevelClient(RestClient, CheckedConsumer, List)} constructor can be used. * {@link #RestHighLevelClient(RestClient, CheckedConsumer, List)} constructor can be used.
* This class can also be sub-classed to expose additional client methods that make use of endpoints added to Elasticsearch through * <p>
* plugins, or to add support for custom response sections, again added to Elasticsearch through plugins. *
* This class can also be sub-classed to expose additional client methods that make use of endpoints added to Elasticsearch through plugins,
* or to add support for custom response sections, again added to Elasticsearch through plugins.
* <p>
*
* The majority of the methods in this class come in two flavors, a blocking and an asynchronous version (e.g.
* {@link #search(SearchRequest, RequestOptions)} and {@link #searchAsync(SearchRequest, RequestOptions, ActionListener)}, where the later
* takes an implementation of an {@link ActionListener} as an argument that needs to implement methods that handle successful responses and
* failure scenarios. Most of the blocking calls can throw an {@link IOException} or an unchecked {@link ElasticsearchException} in the
* following cases:
*
* <ul>
* <li>an {@link IOException} is usually thrown in case of failing to parse the REST response in the high-level REST client, the request
* times out or similar cases where there is no response coming back from the Elasticsearch server</li>
* <li>an {@link ElasticsearchException} is usually thrown in case where the server returns a 4xx or 5xx error code. The high-level client
* then tries to parse the response body error details into a generic ElasticsearchException and suppresses the original
* {@link ResponseException}</li>
* </ul>
*
*/ */
public class RestHighLevelClient implements Closeable { public class RestHighLevelClient implements Closeable {
@ -448,7 +468,6 @@ public class RestHighLevelClient implements Closeable {
* @param bulkRequest the request * @param bulkRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final BulkResponse bulk(BulkRequest bulkRequest, RequestOptions options) throws IOException { public final BulkResponse bulk(BulkRequest bulkRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, emptySet()); return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, emptySet());
@ -471,7 +490,6 @@ public class RestHighLevelClient implements Closeable {
* @param reindexRequest the request * @param reindexRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final BulkByScrollResponse reindex(ReindexRequest reindexRequest, RequestOptions options) throws IOException { public final BulkByScrollResponse reindex(ReindexRequest reindexRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity( return performRequestAndParseEntity(
@ -485,7 +503,6 @@ public class RestHighLevelClient implements Closeable {
* @param reindexRequest the request * @param reindexRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the submission response * @return the submission response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final TaskSubmissionResponse submitReindexTask(ReindexRequest reindexRequest, RequestOptions options) throws IOException { public final TaskSubmissionResponse submitReindexTask(ReindexRequest reindexRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity( return performRequestAndParseEntity(
@ -513,7 +530,6 @@ public class RestHighLevelClient implements Closeable {
* @param updateByQueryRequest the request * @param updateByQueryRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final BulkByScrollResponse updateByQuery(UpdateByQueryRequest updateByQueryRequest, RequestOptions options) throws IOException { public final BulkByScrollResponse updateByQuery(UpdateByQueryRequest updateByQueryRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity( return performRequestAndParseEntity(
@ -543,7 +559,6 @@ public class RestHighLevelClient implements Closeable {
* @param deleteByQueryRequest the request * @param deleteByQueryRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final BulkByScrollResponse deleteByQuery(DeleteByQueryRequest deleteByQueryRequest, RequestOptions options) throws IOException { public final BulkByScrollResponse deleteByQuery(DeleteByQueryRequest deleteByQueryRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity( return performRequestAndParseEntity(
@ -573,7 +588,6 @@ public class RestHighLevelClient implements Closeable {
* @param rethrottleRequest the request * @param rethrottleRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final ListTasksResponse deleteByQueryRethrottle(RethrottleRequest rethrottleRequest, RequestOptions options) throws IOException { public final ListTasksResponse deleteByQueryRethrottle(RethrottleRequest rethrottleRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(rethrottleRequest, RequestConverters::rethrottleDeleteByQuery, options, return performRequestAndParseEntity(rethrottleRequest, RequestConverters::rethrottleDeleteByQuery, options,
@ -601,7 +615,6 @@ public class RestHighLevelClient implements Closeable {
* @param rethrottleRequest the request * @param rethrottleRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final ListTasksResponse updateByQueryRethrottle(RethrottleRequest rethrottleRequest, RequestOptions options) throws IOException { public final ListTasksResponse updateByQueryRethrottle(RethrottleRequest rethrottleRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(rethrottleRequest, RequestConverters::rethrottleUpdateByQuery, options, return performRequestAndParseEntity(rethrottleRequest, RequestConverters::rethrottleUpdateByQuery, options,
@ -630,7 +643,6 @@ public class RestHighLevelClient implements Closeable {
* @param rethrottleRequest the request * @param rethrottleRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final ListTasksResponse reindexRethrottle(RethrottleRequest rethrottleRequest, RequestOptions options) throws IOException { public final ListTasksResponse reindexRethrottle(RethrottleRequest rethrottleRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(rethrottleRequest, RequestConverters::rethrottleReindex, options, return performRequestAndParseEntity(rethrottleRequest, RequestConverters::rethrottleReindex, options,
@ -656,7 +668,6 @@ public class RestHighLevelClient implements Closeable {
* Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise * Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return <code>true</code> if the ping succeeded, false otherwise * @return <code>true</code> if the ping succeeded, false otherwise
* @throws IOException in case there is a problem sending the request
*/ */
public final boolean ping(RequestOptions options) throws IOException { public final boolean ping(RequestOptions options) throws IOException {
return performRequest(new MainRequest(), (request) -> RequestConverters.ping(), options, RestHighLevelClient::convertExistsResponse, return performRequest(new MainRequest(), (request) -> RequestConverters.ping(), options, RestHighLevelClient::convertExistsResponse,
@ -667,7 +678,6 @@ public class RestHighLevelClient implements Closeable {
* Get the cluster info otherwise provided when sending an HTTP request to '/' * Get the cluster info otherwise provided when sending an HTTP request to '/'
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final MainResponse info(RequestOptions options) throws IOException { public final MainResponse info(RequestOptions options) throws IOException {
return performRequestAndParseEntity(new MainRequest(), (request) -> RequestConverters.info(), options, return performRequestAndParseEntity(new MainRequest(), (request) -> RequestConverters.info(), options,
@ -680,7 +690,6 @@ public class RestHighLevelClient implements Closeable {
* @param getRequest the request * @param getRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final GetResponse get(GetRequest getRequest, RequestOptions options) throws IOException { public final GetResponse get(GetRequest getRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(getRequest, RequestConverters::get, options, GetResponse::fromXContent, singleton(404)); return performRequestAndParseEntity(getRequest, RequestConverters::get, options, GetResponse::fromXContent, singleton(404));
@ -704,7 +713,6 @@ public class RestHighLevelClient implements Closeable {
* @param multiGetRequest the request * @param multiGetRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
* @deprecated use {@link #mget(MultiGetRequest, RequestOptions)} instead * @deprecated use {@link #mget(MultiGetRequest, RequestOptions)} instead
*/ */
@Deprecated @Deprecated
@ -719,7 +727,6 @@ public class RestHighLevelClient implements Closeable {
* @param multiGetRequest the request * @param multiGetRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final MultiGetResponse mget(MultiGetRequest multiGetRequest, RequestOptions options) throws IOException { public final MultiGetResponse mget(MultiGetRequest multiGetRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(multiGetRequest, RequestConverters::multiGet, options, MultiGetResponse::fromXContent, return performRequestAndParseEntity(multiGetRequest, RequestConverters::multiGet, options, MultiGetResponse::fromXContent,
@ -757,7 +764,6 @@ public class RestHighLevelClient implements Closeable {
* @param getRequest the request * @param getRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return <code>true</code> if the document exists, <code>false</code> otherwise * @return <code>true</code> if the document exists, <code>false</code> otherwise
* @throws IOException in case there is a problem sending the request
*/ */
public final boolean exists(GetRequest getRequest, RequestOptions options) throws IOException { public final boolean exists(GetRequest getRequest, RequestOptions options) throws IOException {
return performRequest(getRequest, RequestConverters::exists, options, RestHighLevelClient::convertExistsResponse, emptySet()); return performRequest(getRequest, RequestConverters::exists, options, RestHighLevelClient::convertExistsResponse, emptySet());
@ -777,20 +783,19 @@ public class RestHighLevelClient implements Closeable {
/** /**
* Checks for the existence of a document with a "_source" field. Returns true if it exists, false otherwise. * Checks for the existence of a document with a "_source" field. Returns true if it exists, false otherwise.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html#_source">Source exists API * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html#_source">Source exists API
* on elastic.co</a> * on elastic.co</a>
* @param getRequest the request * @param getRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return <code>true</code> if the document and _source field exists, <code>false</code> otherwise * @return <code>true</code> if the document and _source field exists, <code>false</code> otherwise
* @throws IOException in case there is a problem sending the request
*/ */
public boolean existsSource(GetRequest getRequest, RequestOptions options) throws IOException { public boolean existsSource(GetRequest getRequest, RequestOptions options) throws IOException {
return performRequest(getRequest, RequestConverters::sourceExists, options, RestHighLevelClient::convertExistsResponse, emptySet()); return performRequest(getRequest, RequestConverters::sourceExists, options, RestHighLevelClient::convertExistsResponse, emptySet());
} }
/** /**
* Asynchronously checks for the existence of a document with a "_source" field. Returns true if it exists, false otherwise. * Asynchronously checks for the existence of a document with a "_source" field. Returns true if it exists, false otherwise.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html#_source">Source exists API * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html#_source">Source exists API
* on elastic.co</a> * on elastic.co</a>
* @param getRequest the request * @param getRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
@ -799,15 +804,14 @@ public class RestHighLevelClient implements Closeable {
public final void existsSourceAsync(GetRequest getRequest, RequestOptions options, ActionListener<Boolean> listener) { public final void existsSourceAsync(GetRequest getRequest, RequestOptions options, ActionListener<Boolean> listener) {
performRequestAsync(getRequest, RequestConverters::sourceExists, options, RestHighLevelClient::convertExistsResponse, listener, performRequestAsync(getRequest, RequestConverters::sourceExists, options, RestHighLevelClient::convertExistsResponse, listener,
emptySet()); emptySet());
} }
/** /**
* Index a document using the Index API. * Index a document using the Index API.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html">Index API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html">Index API on elastic.co</a>
* @param indexRequest the request * @param indexRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final IndexResponse index(IndexRequest indexRequest, RequestOptions options) throws IOException { public final IndexResponse index(IndexRequest indexRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, emptySet()); return performRequestAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, emptySet());
@ -831,7 +835,6 @@ public class RestHighLevelClient implements Closeable {
* @param countRequest the request * @param countRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final CountResponse count(CountRequest countRequest, RequestOptions options) throws IOException { public final CountResponse count(CountRequest countRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(countRequest, RequestConverters::count, options, CountResponse::fromXContent, return performRequestAndParseEntity(countRequest, RequestConverters::count, options, CountResponse::fromXContent,
@ -856,7 +859,6 @@ public class RestHighLevelClient implements Closeable {
* @param updateRequest the request * @param updateRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final UpdateResponse update(UpdateRequest updateRequest, RequestOptions options) throws IOException { public final UpdateResponse update(UpdateRequest updateRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(updateRequest, RequestConverters::update, options, UpdateResponse::fromXContent, emptySet()); return performRequestAndParseEntity(updateRequest, RequestConverters::update, options, UpdateResponse::fromXContent, emptySet());
@ -877,10 +879,9 @@ public class RestHighLevelClient implements Closeable {
/** /**
* Deletes a document by id using the Delete API. * Deletes a document by id using the Delete API.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a>
* @param deleteRequest the reuqest * @param deleteRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final DeleteResponse delete(DeleteRequest deleteRequest, RequestOptions options) throws IOException { public final DeleteResponse delete(DeleteRequest deleteRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(deleteRequest, RequestConverters::delete, options, DeleteResponse::fromXContent, return performRequestAndParseEntity(deleteRequest, RequestConverters::delete, options, DeleteResponse::fromXContent,
@ -905,7 +906,6 @@ public class RestHighLevelClient implements Closeable {
* @param searchRequest the request * @param searchRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final SearchResponse search(SearchRequest searchRequest, RequestOptions options) throws IOException { public final SearchResponse search(SearchRequest searchRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(searchRequest, RequestConverters::search, options, SearchResponse::fromXContent, emptySet()); return performRequestAndParseEntity(searchRequest, RequestConverters::search, options, SearchResponse::fromXContent, emptySet());
@ -930,7 +930,6 @@ public class RestHighLevelClient implements Closeable {
* @param multiSearchRequest the request * @param multiSearchRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
* @deprecated use {@link #msearch(MultiSearchRequest, RequestOptions)} instead * @deprecated use {@link #msearch(MultiSearchRequest, RequestOptions)} instead
*/ */
@Deprecated @Deprecated
@ -945,7 +944,6 @@ public class RestHighLevelClient implements Closeable {
* @param multiSearchRequest the request * @param multiSearchRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final MultiSearchResponse msearch(MultiSearchRequest multiSearchRequest, RequestOptions options) throws IOException { public final MultiSearchResponse msearch(MultiSearchRequest multiSearchRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(multiSearchRequest, RequestConverters::multiSearch, options, MultiSearchResponse::fromXContext, return performRequestAndParseEntity(multiSearchRequest, RequestConverters::multiSearch, options, MultiSearchResponse::fromXContext,
@ -988,7 +986,6 @@ public class RestHighLevelClient implements Closeable {
* @param searchScrollRequest the request * @param searchScrollRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
* @deprecated use {@link #scroll(SearchScrollRequest, RequestOptions)} instead * @deprecated use {@link #scroll(SearchScrollRequest, RequestOptions)} instead
*/ */
@Deprecated @Deprecated
@ -1003,7 +1000,6 @@ public class RestHighLevelClient implements Closeable {
* @param searchScrollRequest the request * @param searchScrollRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final SearchResponse scroll(SearchScrollRequest searchScrollRequest, RequestOptions options) throws IOException { public final SearchResponse scroll(SearchScrollRequest searchScrollRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, options, SearchResponse::fromXContent, return performRequestAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, options, SearchResponse::fromXContent,
@ -1046,7 +1042,6 @@ public class RestHighLevelClient implements Closeable {
* @param clearScrollRequest the request * @param clearScrollRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, RequestOptions options) throws IOException { public final ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, options, ClearScrollResponse::fromXContent, return performRequestAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, options, ClearScrollResponse::fromXContent,
@ -1057,7 +1052,7 @@ public class RestHighLevelClient implements Closeable {
* Asynchronously clears one or more scroll ids using the Clear Scroll API. * Asynchronously clears one or more scroll ids using the Clear Scroll API.
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api"> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#_clear_scroll_api">
* Clear Scroll API on elastic.co</a> * Clear Scroll API on elastic.co</a>
* @param clearScrollRequest the reuqest * @param clearScrollRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion * @param listener the listener to be notified upon request completion
*/ */
@ -1074,7 +1069,6 @@ public class RestHighLevelClient implements Closeable {
* @param searchTemplateRequest the request * @param searchTemplateRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final SearchTemplateResponse searchTemplate(SearchTemplateRequest searchTemplateRequest, public final SearchTemplateResponse searchTemplate(SearchTemplateRequest searchTemplateRequest,
RequestOptions options) throws IOException { RequestOptions options) throws IOException {
@ -1100,7 +1094,6 @@ public class RestHighLevelClient implements Closeable {
* @param explainRequest the request * @param explainRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final ExplainResponse explain(ExplainRequest explainRequest, RequestOptions options) throws IOException { public final ExplainResponse explain(ExplainRequest explainRequest, RequestOptions options) throws IOException {
return performRequest(explainRequest, RequestConverters::explain, options, return performRequest(explainRequest, RequestConverters::explain, options,
@ -1198,7 +1191,6 @@ public class RestHighLevelClient implements Closeable {
* @param rankEvalRequest the request * @param rankEvalRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final RankEvalResponse rankEval(RankEvalRequest rankEvalRequest, RequestOptions options) throws IOException { public final RankEvalResponse rankEval(RankEvalRequest rankEvalRequest, RequestOptions options) throws IOException {
return performRequestAndParseEntity(rankEvalRequest, RequestConverters::rankEval, options, RankEvalResponse::fromXContent, return performRequestAndParseEntity(rankEvalRequest, RequestConverters::rankEval, options, RankEvalResponse::fromXContent,
@ -1251,7 +1243,6 @@ public class RestHighLevelClient implements Closeable {
* @param fieldCapabilitiesRequest the request * @param fieldCapabilitiesRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest, public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest,
RequestOptions options) throws IOException { RequestOptions options) throws IOException {
@ -1266,7 +1257,6 @@ public class RestHighLevelClient implements Closeable {
* @param request the request * @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public GetStoredScriptResponse getScript(GetStoredScriptRequest request, RequestOptions options) throws IOException { public GetStoredScriptResponse getScript(GetStoredScriptRequest request, RequestOptions options) throws IOException {
return performRequestAndParseEntity(request, RequestConverters::getScript, options, return performRequestAndParseEntity(request, RequestConverters::getScript, options,
@ -1294,7 +1284,6 @@ public class RestHighLevelClient implements Closeable {
* @param request the request * @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public AcknowledgedResponse deleteScript(DeleteStoredScriptRequest request, RequestOptions options) throws IOException { public AcknowledgedResponse deleteScript(DeleteStoredScriptRequest request, RequestOptions options) throws IOException {
return performRequestAndParseEntity(request, RequestConverters::deleteScript, options, return performRequestAndParseEntity(request, RequestConverters::deleteScript, options,
@ -1322,7 +1311,6 @@ public class RestHighLevelClient implements Closeable {
* @param putStoredScriptRequest the request * @param putStoredScriptRequest the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response * @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/ */
public AcknowledgedResponse putScript(PutStoredScriptRequest putStoredScriptRequest, public AcknowledgedResponse putScript(PutStoredScriptRequest putStoredScriptRequest,
RequestOptions options) throws IOException { RequestOptions options) throws IOException {
@ -1450,9 +1438,9 @@ public class RestHighLevelClient implements Closeable {
throw new IOException("Unable to parse response body for " + response, e); throw new IOException("Unable to parse response body for " + response, e);
} }
} }
/** /**
* Defines a helper method for requests that can 404 and in which case will return an empty Optional * Defines a helper method for requests that can 404 and in which case will return an empty Optional
* otherwise tries to parse the response body * otherwise tries to parse the response body
*/ */
protected final <Req extends Validatable, Resp> Optional<Resp> performRequestAndParseOptionalEntity(Req request, protected final <Req extends Validatable, Resp> Optional<Resp> performRequestAndParseOptionalEntity(Req request,
@ -1481,7 +1469,7 @@ public class RestHighLevelClient implements Closeable {
} catch (Exception e) { } catch (Exception e) {
throw new IOException("Unable to parse response body for " + response, e); throw new IOException("Unable to parse response body for " + response, e);
} }
} }
/** /**
* @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation
@ -1603,9 +1591,9 @@ public class RestHighLevelClient implements Closeable {
} }
}; };
} }
/** /**
* Async request which returns empty Optionals in the case of 404s or parses entity into an Optional * Asynchronous request which returns empty {@link Optional}s in the case of 404s or parses entity into an Optional
*/ */
protected final <Req extends Validatable, Resp> void performRequestAsyncAndParseOptionalEntity(Req request, protected final <Req extends Validatable, Resp> void performRequestAsyncAndParseOptionalEntity(Req request,
CheckedFunction<Req, Request, IOException> requestConverter, CheckedFunction<Req, Request, IOException> requestConverter,
@ -1625,11 +1613,11 @@ public class RestHighLevelClient implements Closeable {
return; return;
} }
req.setOptions(options); req.setOptions(options);
ResponseListener responseListener = wrapResponseListener404sOptional(response -> parseEntity(response.getEntity(), ResponseListener responseListener = wrapResponseListener404sOptional(response -> parseEntity(response.getEntity(),
entityParser), listener); entityParser), listener);
client.performRequestAsync(req, responseListener); client.performRequestAsync(req, responseListener);
} }
final <Resp> ResponseListener wrapResponseListener404sOptional(CheckedFunction<Response, Resp, IOException> responseConverter, final <Resp> ResponseListener wrapResponseListener404sOptional(CheckedFunction<Response, Resp, IOException> responseConverter,
ActionListener<Optional<Resp>> actionListener) { ActionListener<Optional<Resp>> actionListener) {
return new ResponseListener() { return new ResponseListener() {
@ -1658,7 +1646,7 @@ public class RestHighLevelClient implements Closeable {
} }
} }
}; };
} }
/** /**
* Converts a {@link ResponseException} obtained from the low level REST client into an {@link ElasticsearchException}. * Converts a {@link ResponseException} obtained from the low level REST client into an {@link ElasticsearchException}.

View File

@ -19,41 +19,59 @@
package org.elasticsearch.client.ccr; package org.elasticsearch.client.ccr;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import java.io.IOException; import java.util.AbstractMap;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.NavigableMap;
import java.util.Objects; import java.util.Objects;
import java.util.TreeMap;
import java.util.stream.Collectors;
public final class GetAutoFollowPatternResponse { public final class GetAutoFollowPatternResponse {
public static GetAutoFollowPatternResponse fromXContent(final XContentParser parser) throws IOException { static final ParseField PATTERNS_FIELD = new ParseField("patterns");
final Map<String, Pattern> patterns = new HashMap<>(); static final ParseField NAME_FIELD = new ParseField("name");
for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { static final ParseField PATTERN_FIELD = new ParseField("pattern");
if (token == Token.FIELD_NAME) {
final String name = parser.currentName(); private static final ConstructingObjectParser<Map.Entry<String, Pattern>, Void> ENTRY_PARSER = new ConstructingObjectParser<>(
final Pattern pattern = Pattern.PARSER.parse(parser, null); "get_auto_follow_pattern_response", args -> new AbstractMap.SimpleEntry<>((String) args[0], (Pattern) args[1]));
patterns.put(name, pattern);
} static {
} ENTRY_PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME_FIELD);
return new GetAutoFollowPatternResponse(patterns); ENTRY_PARSER.declareObject(ConstructingObjectParser.constructorArg(), Pattern.PARSER, PATTERN_FIELD);
} }
private final Map<String, Pattern> patterns; private static final ConstructingObjectParser<GetAutoFollowPatternResponse, Void> PARSER = new ConstructingObjectParser<>(
"get_auto_follow_pattern_response", args -> {
@SuppressWarnings("unchecked")
List<Map.Entry<String, Pattern>> entries = (List<Map.Entry<String, Pattern>>) args[0];
return new GetAutoFollowPatternResponse(new TreeMap<>(entries.stream()
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))));
});
GetAutoFollowPatternResponse(Map<String, Pattern> patterns) { static {
this.patterns = Collections.unmodifiableMap(patterns); PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), ENTRY_PARSER, PATTERNS_FIELD);
} }
public Map<String, Pattern> getPatterns() { public static GetAutoFollowPatternResponse fromXContent(final XContentParser parser) {
return PARSER.apply(parser, null);
}
private final NavigableMap<String, Pattern> patterns;
GetAutoFollowPatternResponse(NavigableMap<String, Pattern> patterns) {
this.patterns = Collections.unmodifiableNavigableMap(patterns);
}
public NavigableMap<String, Pattern> getPatterns() {
return patterns; return patterns;
} }

View File

@ -27,8 +27,9 @@ import org.elasticsearch.test.ESTestCase;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.FOLLOW_PATTERN_FIELD; import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.FOLLOW_PATTERN_FIELD;
import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.LEADER_PATTERNS_FIELD; import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.LEADER_PATTERNS_FIELD;
@ -48,7 +49,7 @@ public class GetAutoFollowPatternResponseTests extends ESTestCase {
private GetAutoFollowPatternResponse createTestInstance() { private GetAutoFollowPatternResponse createTestInstance() {
int numPatterns = randomIntBetween(0, 16); int numPatterns = randomIntBetween(0, 16);
Map<String, GetAutoFollowPatternResponse.Pattern> patterns = new HashMap<>(numPatterns); NavigableMap<String, GetAutoFollowPatternResponse.Pattern> patterns = new TreeMap<>();
for (int i = 0; i < numPatterns; i++) { for (int i = 0; i < numPatterns; i++) {
GetAutoFollowPatternResponse.Pattern pattern = new GetAutoFollowPatternResponse.Pattern( GetAutoFollowPatternResponse.Pattern pattern = new GetAutoFollowPatternResponse.Pattern(
randomAlphaOfLength(4), Collections.singletonList(randomAlphaOfLength(4)), randomAlphaOfLength(4)); randomAlphaOfLength(4), Collections.singletonList(randomAlphaOfLength(4)), randomAlphaOfLength(4));
@ -90,17 +91,26 @@ public class GetAutoFollowPatternResponseTests extends ESTestCase {
public static void toXContent(GetAutoFollowPatternResponse response, XContentBuilder builder) throws IOException { public static void toXContent(GetAutoFollowPatternResponse response, XContentBuilder builder) throws IOException {
builder.startObject(); builder.startObject();
{ {
builder.startArray(GetAutoFollowPatternResponse.PATTERNS_FIELD.getPreferredName());
for (Map.Entry<String, GetAutoFollowPatternResponse.Pattern> entry : response.getPatterns().entrySet()) { for (Map.Entry<String, GetAutoFollowPatternResponse.Pattern> entry : response.getPatterns().entrySet()) {
builder.startObject(entry.getKey()); builder.startObject();
GetAutoFollowPatternResponse.Pattern pattern = entry.getValue(); {
builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), pattern.getRemoteCluster()); builder.field(GetAutoFollowPatternResponse.NAME_FIELD.getPreferredName(), entry.getKey());
builder.field(LEADER_PATTERNS_FIELD.getPreferredName(), pattern.getLeaderIndexPatterns()); builder.startObject(GetAutoFollowPatternResponse.PATTERN_FIELD.getPreferredName());
if (pattern.getFollowIndexNamePattern()!= null) { {
builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), pattern.getFollowIndexNamePattern()); GetAutoFollowPatternResponse.Pattern pattern = entry.getValue();
builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), pattern.getRemoteCluster());
builder.field(LEADER_PATTERNS_FIELD.getPreferredName(), pattern.getLeaderIndexPatterns());
if (pattern.getFollowIndexNamePattern()!= null) {
builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), pattern.getFollowIndexNamePattern());
}
entry.getValue().toXContentFragment(builder, ToXContent.EMPTY_PARAMS);
}
builder.endObject();
} }
entry.getValue().toXContentFragment(builder, ToXContent.EMPTY_PARAMS);
builder.endObject(); builder.endObject();
} }
builder.endArray();
} }
builder.endObject(); builder.endObject();
} }

View File

@ -18,6 +18,15 @@ for the +{response}+ to be returned before continuing with code execution:
include-tagged::{doc-tests-file}[{api}-execute] include-tagged::{doc-tests-file}[{api}-execute]
-------------------------------------------------- --------------------------------------------------
Synchronous calls may throw an `IOException` in case of either failing to
parse the REST response in the high-level REST client, the request times out
or similar cases where there is no response coming back from the server.
In cases where the server returns a `4xx` or `5xx` error code, the high-level
client tries to parse the response body error details instead and then throws
a generic `ElasticsearchException` and adds the original `ResponseException` as a
suppressed exception to it.
[id="{upid}-{api}-async"] [id="{upid}-{api}-async"]
==== Asynchronous Execution ==== Asynchronous Execution
@ -36,7 +45,8 @@ the execution completes
The asynchronous method does not block and returns immediately. Once it is The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back using the `onResponse` method completed the `ActionListener` is called back using the `onResponse` method
if the execution successfully completed or using the `onFailure` method if if the execution successfully completed or using the `onFailure` method if
it failed. it failed. Failure scenarios and expected exceptions are the same as in the
synchronous execution case.
A typical listener for +{api}+ looks like: A typical listener for +{api}+ looks like:
@ -45,4 +55,4 @@ A typical listener for +{api}+ looks like:
include-tagged::{doc-tests-file}[{api}-execute-listener] include-tagged::{doc-tests-file}[{api}-execute-listener]
-------------------------------------------------- --------------------------------------------------
<1> Called when the execution is successfully completed. <1> Called when the execution is successfully completed.
<2> Called when the whole +{request}+ fails. <2> Called when the whole +{request}+ fails.

View File

@ -87,15 +87,19 @@ The API returns the following result:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
{ {
"my_auto_follow_pattern" : "patterns": [
{ {
"remote_cluster" : "remote_cluster", "name": "my_auto_follow_pattern",
"leader_index_patterns" : "pattern": {
[ "remote_cluster" : "remote_cluster",
"leader_index*" "leader_index_patterns" :
], [
"follow_index_pattern" : "{{leader_index}}-follower" "leader_index*"
} ],
"follow_index_pattern" : "{{leader_index}}-follower"
}
}
]
} }
-------------------------------------------------- --------------------------------------------------
// TESTRESPONSE // TESTRESPONSE

View File

@ -1,30 +1,31 @@
[role="xpack"] [role="xpack"]
[testenv="gold"]
[[security-files]] [[security-files]]
=== Security Files === Security files
{security} uses the following files: The {es} {security-features} use the following files:
* `ES_PATH_CONF/roles.yml` defines the roles in use on the cluster * `ES_PATH_CONF/roles.yml` defines the roles in use on the cluster. See
(read more <<roles-management-file, here>>). {stack-ov}/defining-roles.html[Defining roles].
* `ES_PATH_CONF/elasticsearch-users` defines the users and their hashed passwords for * `ES_PATH_CONF/elasticsearch-users` defines the users and their hashed passwords for
the <<file-realm,`file` realm>>. the `file` realm. See <<configuring-file-realm>>.
* `ES_PATH_CONF/elasticsearch-users_roles` defines the user roles assignment for the * `ES_PATH_CONF/elasticsearch-users_roles` defines the user roles assignment for the
the <<file-realm, `file` realm>>. the `file` realm. See <<configuring-file-realm>>.
* `ES_PATH_CONF/role_mapping.yml` defines the role assignments for a * `ES_PATH_CONF/role_mapping.yml` defines the role assignments for a
Distinguished Name (DN) to a role. This allows for LDAP and Active Directory Distinguished Name (DN) to a role. This allows for LDAP and Active Directory
groups and users and PKI users to be mapped to roles (read more groups and users and PKI users to be mapped to roles. See
<<mapping-roles, here>>). {stack-ov}/mapping-roles.html[Mapping users and groups to roles].
* `ES_PATH_CONF/log4j2.properties` contains audit information (read more * `ES_PATH_CONF/log4j2.properties` contains audit information. See
<<logging-file, here>>). {stack-ov}/audit-log-output.html[Logfile audit output].
[[security-files-location]] [[security-files-location]]
IMPORTANT: Any files that {security} uses must be stored in the Elasticsearch IMPORTANT: Any files that the {security-features} use must be stored in the {es}
configuration directory. Elasticsearch runs with restricted permissions configuration directory. {es} runs with restricted permissions
and is only permitted to read from the locations configured in the and is only permitted to read from the locations configured in the
directory layout for enhanced security. directory layout for enhanced security.

View File

@ -5,8 +5,9 @@
<titleabbrev>Security settings</titleabbrev> <titleabbrev>Security settings</titleabbrev>
++++ ++++
By default, {security} is disabled when you have a basic or trial license. To By default, the {es} {security-features} are disabled when you have a basic or
enable {security}, use the `xpack.security.enabled` setting. trial license. To enable {security-features}, use the `xpack.security.enabled`
setting.
You configure `xpack.security` settings to You configure `xpack.security` settings to
<<anonymous-access-settings, enable anonymous access>> <<anonymous-access-settings, enable anonymous access>>
@ -25,13 +26,15 @@ For more information about creating and updating the {es} keystore, see
[[general-security-settings]] [[general-security-settings]]
==== General security settings ==== General security settings
`xpack.security.enabled`:: `xpack.security.enabled`::
Set to `true` to enable {security} on the node. + Set to `true` to enable {es} {security-features} on the node. +
+ +
-- --
If set to `false`, which is the default value for basic and trial licenses, If set to `false`, which is the default value for basic and trial licenses,
{security} is disabled. It also affects all {kib} instances that connect to this {security-features} are disabled. It also affects all {kib} instances that
{es} instance; you do not need to disable {security} in those `kibana.yml` files. connect to this {es} instance; you do not need to disable {security-features} in
For more information about disabling {security} in specific {kib} instances, see {kibana-ref}/security-settings-kb.html[{kib} security settings]. those `kibana.yml` files. For more information about disabling {security-features}
in specific {kib} instances, see
{kibana-ref}/security-settings-kb.html[{kib} security settings].
TIP: If you have gold or higher licenses, the default value is `true`; we TIP: If you have gold or higher licenses, the default value is `true`; we
recommend that you explicitly add this setting to avoid confusion. recommend that you explicitly add this setting to avoid confusion.
@ -67,7 +70,7 @@ See <<password-hashing-algorithms>>. Defaults to `bcrypt`.
[[anonymous-access-settings]] [[anonymous-access-settings]]
==== Anonymous access settings ==== Anonymous access settings
You can configure the following anonymous access settings in You can configure the following anonymous access settings in
`elasticsearch.yml`. For more information, see {xpack-ref}/anonymous-access.html[ `elasticsearch.yml`. For more information, see {stack-ov}/anonymous-access.html[
Enabling anonymous access]. Enabling anonymous access].
`xpack.security.authc.anonymous.username`:: `xpack.security.authc.anonymous.username`::
@ -117,7 +120,7 @@ Defaults to `48h` (48 hours).
You can set the following document and field level security You can set the following document and field level security
settings in `elasticsearch.yml`. For more information, see settings in `elasticsearch.yml`. For more information, see
{xpack-ref}/field-and-document-access-control.html[Setting up document and field {stack-ov}/field-and-document-access-control.html[Setting up document and field
level security]. level security].
`xpack.security.dls_fls.enabled`:: `xpack.security.dls_fls.enabled`::
@ -165,7 +168,7 @@ xpack.security.authc.realms:
---------------------------------------- ----------------------------------------
The valid settings vary depending on the realm type. For more The valid settings vary depending on the realm type. For more
information, see {xpack-ref}/setting-up-authentication.html[Setting up authentication]. information, see {stack-ov}/setting-up-authentication.html[Setting up authentication].
[float] [float]
[[ref-realm-settings]] [[ref-realm-settings]]
@ -204,7 +207,7 @@ Defaults to `ssha256`.
`authentication.enabled`:: If set to `false`, disables authentication support in `authentication.enabled`:: If set to `false`, disables authentication support in
this realm, so that it only supports user lookups. this realm, so that it only supports user lookups.
(See the {xpack-ref}/run-as-privilege.html[run as] and (See the {stack-ov}/run-as-privilege.html[run as] and
{stack-ov}/realm-chains.html#authorization_realms[authorization realms] features). {stack-ov}/realm-chains.html#authorization_realms[authorization realms] features).
Defaults to `true`. Defaults to `true`.
@ -233,7 +236,7 @@ user credentials. See <<cache-hash-algo>>. Defaults to `ssha256`.
`authentication.enabled`:: If set to `false`, disables authentication support in `authentication.enabled`:: If set to `false`, disables authentication support in
this realm, so that it only supports user lookups. this realm, so that it only supports user lookups.
(See the {xpack-ref}/run-as-privilege.html[run as] and (See the {stack-ov}/run-as-privilege.html[run as] and
{stack-ov}/realm-chains.html#authorization_realms[authorization realms] features). {stack-ov}/realm-chains.html#authorization_realms[authorization realms] features).
Defaults to `true`. Defaults to `true`.
@ -282,7 +285,7 @@ The DN template that replaces the user name with the string `{0}`.
This setting is multivalued; you can specify multiple user contexts. This setting is multivalued; you can specify multiple user contexts.
Required to operate in user template mode. If `user_search.base_dn` is specified, Required to operate in user template mode. If `user_search.base_dn` is specified,
this setting is not valid. For more information on this setting is not valid. For more information on
the different modes, see {xpack-ref}/ldap-realm.html[LDAP realms]. the different modes, see {stack-ov}/ldap-realm.html[LDAP realms].
`authorization_realms`:: `authorization_realms`::
The names of the realms that should be consulted for delegated authorization. The names of the realms that should be consulted for delegated authorization.
@ -306,7 +309,7 @@ to `memberOf`.
Specifies a container DN to search for users. Required Specifies a container DN to search for users. Required
to operated in user search mode. If `user_dn_templates` is specified, this to operated in user search mode. If `user_dn_templates` is specified, this
setting is not valid. For more information on setting is not valid. For more information on
the different modes, see {xpack-ref}/ldap-realm.html[LDAP realms]. the different modes, see {stack-ov}/ldap-realm.html[LDAP realms].
`user_search.scope`:: `user_search.scope`::
The scope of the user search. Valid values are `sub_tree`, `one_level` or The scope of the user search. Valid values are `sub_tree`, `one_level` or
@ -379,11 +382,11 @@ the filter. If not set, the user DN is passed into the filter. Defaults to Empt
If set to `true`, the names of any unmapped LDAP groups are used as role names If set to `true`, the names of any unmapped LDAP groups are used as role names
and assigned to the user. A group is considered to be _unmapped_ if it is not and assigned to the user. A group is considered to be _unmapped_ if it is not
referenced in a referenced in a
{xpack-ref}/mapping-roles.html#mapping-roles-file[role-mapping file]. API-based {stack-ov}/mapping-roles.html#mapping-roles-file[role-mapping file]. API-based
role mappings are not considered. Defaults to `false`. role mappings are not considered. Defaults to `false`.
`files.role_mapping`:: `files.role_mapping`::
The {xpack-ref}/security-files.html[location] for the {xpack-ref}/mapping-roles.html#mapping-roles[ The <<security-files,location>> for the {stack-ov}/mapping-roles.html#mapping-roles[
YAML role mapping configuration file]. Defaults to YAML role mapping configuration file]. Defaults to
`ES_PATH_CONF/role_mapping.yml`. `ES_PATH_CONF/role_mapping.yml`.
@ -501,7 +504,7 @@ in-memory cached user credentials. See <<cache-hash-algo>>. Defaults to `ssha256
`authentication.enabled`:: If set to `false`, disables authentication support in `authentication.enabled`:: If set to `false`, disables authentication support in
this realm, so that it only supports user lookups. this realm, so that it only supports user lookups.
(See the {xpack-ref}/run-as-privilege.html[run as] and (See the {stack-ov}/run-as-privilege.html[run as] and
{stack-ov}/realm-chains.html#authorization_realms[authorization realms] features). {stack-ov}/realm-chains.html#authorization_realms[authorization realms] features).
Defaults to `true`. Defaults to `true`.
@ -557,7 +560,7 @@ is not referenced in any role-mapping files. API-based role mappings are not
considered. Defaults to `false`. considered. Defaults to `false`.
`files.role_mapping`:: `files.role_mapping`::
The {xpack-ref}/security-files.html[location] for the YAML The <<security-files,location>> for the YAML
role mapping configuration file. Defaults to `ES_PATH_CONF/role_mapping.yml`. role mapping configuration file. Defaults to `ES_PATH_CONF/role_mapping.yml`.
`user_search.base_dn`:: `user_search.base_dn`::
@ -748,7 +751,7 @@ the in-memory cached user credentials. See <<cache-hash-algo>>. Defaults to `ssh
`authentication.enabled`:: If set to `false`, disables authentication support in `authentication.enabled`:: If set to `false`, disables authentication support in
this realm, so that it only supports user lookups. this realm, so that it only supports user lookups.
(See the {xpack-ref}/run-as-privilege.html[run as] and (See the {stack-ov}/run-as-privilege.html[run as] and
{stack-ov}/realm-chains.html#authorization_realms[authorization realms] features). {stack-ov}/realm-chains.html#authorization_realms[authorization realms] features).
Defaults to `true`. Defaults to `true`.
@ -789,8 +792,8 @@ The path of a truststore to use. Defaults to the trusted certificates configured
for SSL. This setting cannot be used with `certificate_authorities`. for SSL. This setting cannot be used with `certificate_authorities`.
`files.role_mapping`:: `files.role_mapping`::
Specifies the {xpack-ref}/security-files.html[location] of the Specifies the <<security-files,location>> of the
{xpack-ref}/mapping-roles.html[YAML role mapping configuration file]. {stack-ov}/mapping-roles.html[YAML role mapping configuration file].
Defaults to `ES_PATH_CONF/role_mapping.yml`. Defaults to `ES_PATH_CONF/role_mapping.yml`.
`authorization_realms`:: `authorization_realms`::
@ -1207,7 +1210,7 @@ through the list of URLs will continue until a successful connection is made.
==== Default TLS/SSL settings ==== Default TLS/SSL settings
You can configure the following TLS/SSL settings in You can configure the following TLS/SSL settings in
`elasticsearch.yml`. For more information, see `elasticsearch.yml`. For more information, see
{xpack-ref}/encrypting-communications.html[Encrypting communications]. These settings will be used {stack-ov}/encrypting-communications.html[Encrypting communications]. These settings will be used
for all of {xpack} unless they have been overridden by more specific for all of {xpack} unless they have been overridden by more specific
settings such as those for HTTP or Transport. settings such as those for HTTP or Transport.
@ -1447,7 +1450,7 @@ See also <<remote-audit-settings>>.
[float] [float]
[[ip-filtering-settings]] [[ip-filtering-settings]]
==== IP filtering settings ==== IP filtering settings
You can configure the following settings for {xpack-ref}/ip-filtering.html[IP filtering]. You can configure the following settings for {stack-ov}/ip-filtering.html[IP filtering].
`xpack.security.transport.filter.allow`:: `xpack.security.transport.filter.allow`::
List of IP addresses to allow. List of IP addresses to allow.

View File

@ -102,7 +102,9 @@ public class MultiTermVectorsRequest extends ActionRequest
throw new IllegalArgumentException("docs array element should include an object"); throw new IllegalArgumentException("docs array element should include an object");
} }
TermVectorsRequest termVectorsRequest = new TermVectorsRequest(template); TermVectorsRequest termVectorsRequest = new TermVectorsRequest(template);
termVectorsRequest.type(MapperService.SINGLE_MAPPING_NAME); if (termVectorsRequest.type() == null) {
termVectorsRequest.type(MapperService.SINGLE_MAPPING_NAME);
}
TermVectorsRequest.parseRequest(termVectorsRequest, parser); TermVectorsRequest.parseRequest(termVectorsRequest, parser);
add(termVectorsRequest); add(termVectorsRequest);
} }

View File

@ -42,4 +42,17 @@ public interface RepositoryPlugin {
default Map<String, Repository.Factory> getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { default Map<String, Repository.Factory> getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) {
return Collections.emptyMap(); return Collections.emptyMap();
} }
/**
* Returns internal repository types added by this plugin. Internal repositories cannot be registered
* through the external API.
*
* @param env The environment for the local node, which may be used for the local settings and path.repo
*
* The key of the returned {@link Map} is the type name of the repository and
* the value is a factory to construct the {@link Repository} interface.
*/
default Map<String, Repository.Factory> getInternalRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) {
return Collections.emptyMap();
}
} }

View File

@ -58,8 +58,24 @@ public class RepositoriesModule extends AbstractModule {
} }
} }
Map<String, Repository.Factory> internalFactories = new HashMap<>();
for (RepositoryPlugin repoPlugin : repoPlugins) {
Map<String, Repository.Factory> newRepoTypes = repoPlugin.getInternalRepositories(env, namedXContentRegistry);
for (Map.Entry<String, Repository.Factory> entry : newRepoTypes.entrySet()) {
if (internalFactories.put(entry.getKey(), entry.getValue()) != null) {
throw new IllegalArgumentException("Internal repository type [" + entry.getKey() + "] is already registered");
}
if (factories.put(entry.getKey(), entry.getValue()) != null) {
throw new IllegalArgumentException("Internal repository type [" + entry.getKey() + "] is already registered as a " +
"non-internal repository");
}
}
}
Map<String, Repository.Factory> repositoryTypes = Collections.unmodifiableMap(factories); Map<String, Repository.Factory> repositoryTypes = Collections.unmodifiableMap(factories);
repositoriesService = new RepositoriesService(env.settings(), clusterService, transportService, repositoryTypes, threadPool); Map<String, Repository.Factory> internalRepositoryTypes = Collections.unmodifiableMap(internalFactories);
repositoriesService = new RepositoriesService(env.settings(), clusterService, transportService, repositoryTypes,
internalRepositoryTypes, threadPool);
} }
@Override @Override

View File

@ -36,6 +36,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.snapshots.RestoreService;
import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.snapshots.SnapshotsService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -57,6 +58,7 @@ public class RepositoriesService implements ClusterStateApplier {
private static final Logger logger = LogManager.getLogger(RepositoriesService.class); private static final Logger logger = LogManager.getLogger(RepositoriesService.class);
private final Map<String, Repository.Factory> typesRegistry; private final Map<String, Repository.Factory> typesRegistry;
private final Map<String, Repository.Factory> internalTypesRegistry;
private final ClusterService clusterService; private final ClusterService clusterService;
@ -64,12 +66,14 @@ public class RepositoriesService implements ClusterStateApplier {
private final VerifyNodeRepositoryAction verifyAction; private final VerifyNodeRepositoryAction verifyAction;
private final Map<String, Repository> internalRepositories = ConcurrentCollections.newConcurrentMap();
private volatile Map<String, Repository> repositories = Collections.emptyMap(); private volatile Map<String, Repository> repositories = Collections.emptyMap();
public RepositoriesService(Settings settings, ClusterService clusterService, TransportService transportService, public RepositoriesService(Settings settings, ClusterService clusterService, TransportService transportService,
Map<String, Repository.Factory> typesRegistry, Map<String, Repository.Factory> typesRegistry, Map<String, Repository.Factory> internalTypesRegistry,
ThreadPool threadPool) { ThreadPool threadPool) {
this.typesRegistry = typesRegistry; this.typesRegistry = typesRegistry;
this.internalTypesRegistry = internalTypesRegistry;
this.clusterService = clusterService; this.clusterService = clusterService;
this.threadPool = threadPool; this.threadPool = threadPool;
// Doesn't make sense to maintain repositories on non-master and non-data nodes // Doesn't make sense to maintain repositories on non-master and non-data nodes
@ -101,7 +105,7 @@ public class RepositoriesService implements ClusterStateApplier {
// Trying to create the new repository on master to make sure it works // Trying to create the new repository on master to make sure it works
try { try {
closeRepository(createRepository(newRepositoryMetaData)); closeRepository(createRepository(newRepositoryMetaData, typesRegistry));
} catch (Exception e) { } catch (Exception e) {
registrationListener.onFailure(e); registrationListener.onFailure(e);
return; return;
@ -315,7 +319,7 @@ public class RepositoriesService implements ClusterStateApplier {
closeRepository(repository); closeRepository(repository);
repository = null; repository = null;
try { try {
repository = createRepository(repositoryMetaData); repository = createRepository(repositoryMetaData, typesRegistry);
} catch (RepositoryException ex) { } catch (RepositoryException ex) {
// TODO: this catch is bogus, it means the old repo is already closed, // TODO: this catch is bogus, it means the old repo is already closed,
// but we have nothing to replace it // but we have nothing to replace it
@ -324,7 +328,7 @@ public class RepositoriesService implements ClusterStateApplier {
} }
} else { } else {
try { try {
repository = createRepository(repositoryMetaData); repository = createRepository(repositoryMetaData, typesRegistry);
} catch (RepositoryException ex) { } catch (RepositoryException ex) {
logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex); logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex);
} }
@ -355,9 +359,37 @@ public class RepositoriesService implements ClusterStateApplier {
if (repository != null) { if (repository != null) {
return repository; return repository;
} }
repository = internalRepositories.get(repositoryName);
if (repository != null) {
return repository;
}
throw new RepositoryMissingException(repositoryName); throw new RepositoryMissingException(repositoryName);
} }
public void registerInternalRepository(String name, String type) {
RepositoryMetaData metaData = new RepositoryMetaData(name, type, Settings.EMPTY);
Repository repository = internalRepositories.computeIfAbsent(name, (n) -> {
logger.debug("put internal repository [{}][{}]", name, type);
return createRepository(metaData, internalTypesRegistry);
});
if (type.equals(repository.getMetadata().type()) == false) {
logger.warn(new ParameterizedMessage("internal repository [{}][{}] already registered. this prevented the registration of " +
"internal repository [{}][{}].", name, repository.getMetadata().type(), name, type));
} else if (repositories.containsKey(name)) {
logger.warn(new ParameterizedMessage("non-internal repository [{}] already registered. this repository will block the " +
"usage of internal repository [{}][{}].", name, metaData.type(), name));
}
}
public void unregisterInternalRepository(String name) {
Repository repository = internalRepositories.remove(name);
if (repository != null) {
RepositoryMetaData metadata = repository.getMetadata();
logger.debug(() -> new ParameterizedMessage("delete internal repository [{}][{}].", metadata.type(), name));
closeRepository(repository);
}
}
/** Closes the given repository. */ /** Closes the given repository. */
private void closeRepository(Repository repository) { private void closeRepository(Repository repository) {
logger.debug("closing repository [{}][{}]", repository.getMetadata().type(), repository.getMetadata().name()); logger.debug("closing repository [{}][{}]", repository.getMetadata().type(), repository.getMetadata().name());
@ -365,21 +397,21 @@ public class RepositoriesService implements ClusterStateApplier {
} }
/** /**
* Creates repository holder * Creates repository holder. This method starts the repository
*/ */
private Repository createRepository(RepositoryMetaData repositoryMetaData) { private Repository createRepository(RepositoryMetaData repositoryMetaData, Map<String, Repository.Factory> factories) {
logger.debug("creating repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()); logger.debug("creating repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name());
Repository.Factory factory = typesRegistry.get(repositoryMetaData.type()); Repository.Factory factory = factories.get(repositoryMetaData.type());
if (factory == null) { if (factory == null) {
throw new RepositoryException(repositoryMetaData.name(), throw new RepositoryException(repositoryMetaData.name(),
"repository type [" + repositoryMetaData.type() + "] does not exist"); "repository type [" + repositoryMetaData.type() + "] does not exist");
} }
try { try {
Repository repository = factory.create(repositoryMetaData, typesRegistry::get); Repository repository = factory.create(repositoryMetaData, factories::get);
repository.start(); repository.start();
return repository; return repository;
} catch (Exception e) { } catch (Exception e) {
logger.warn(() -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e); logger.warn(new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e);
throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e); throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e);
} }
} }

View File

@ -167,7 +167,7 @@ public abstract class RemoteClusterAware {
REMOTE_CLUSTERS_SEEDS); REMOTE_CLUSTERS_SEEDS);
protected final Settings settings; protected final Settings settings;
protected final ClusterNameExpressionResolver clusterNameResolver; private final ClusterNameExpressionResolver clusterNameResolver;
/** /**
* Creates a new {@link RemoteClusterAware} instance * Creates a new {@link RemoteClusterAware} instance
@ -242,14 +242,15 @@ public abstract class RemoteClusterAware {
* indices per cluster are collected as a list in the returned map keyed by the cluster alias. Local indices are grouped under * indices per cluster are collected as a list in the returned map keyed by the cluster alias. Local indices are grouped under
* {@link #LOCAL_CLUSTER_GROUP_KEY}. The returned map is mutable. * {@link #LOCAL_CLUSTER_GROUP_KEY}. The returned map is mutable.
* *
* @param remoteClusterNames the remote cluster names
* @param requestIndices the indices in the search request to filter * @param requestIndices the indices in the search request to filter
* @param indexExists a predicate that can test if a certain index or alias exists in the local cluster * @param indexExists a predicate that can test if a certain index or alias exists in the local cluster
* *
* @return a map of grouped remote and local indices * @return a map of grouped remote and local indices
*/ */
public Map<String, List<String>> groupClusterIndices(String[] requestIndices, Predicate<String> indexExists) { protected Map<String, List<String>> groupClusterIndices(Set<String> remoteClusterNames, String[] requestIndices,
Predicate<String> indexExists) {
Map<String, List<String>> perClusterIndices = new HashMap<>(); Map<String, List<String>> perClusterIndices = new HashMap<>();
Set<String> remoteClusterNames = getRemoteClusterNames();
for (String index : requestIndices) { for (String index : requestIndices) {
int i = index.indexOf(RemoteClusterService.REMOTE_CLUSTER_INDEX_SEPARATOR); int i = index.indexOf(RemoteClusterService.REMOTE_CLUSTER_INDEX_SEPARATOR);
if (i >= 0) { if (i >= 0) {
@ -281,9 +282,6 @@ public abstract class RemoteClusterAware {
return perClusterIndices; return perClusterIndices;
} }
protected abstract Set<String> getRemoteClusterNames();
/** /**
* Subclasses must implement this to receive information about updated cluster aliases. If the given address list is * Subclasses must implement this to receive information about updated cluster aliases. If the given address list is
* empty the cluster alias is unregistered and should be removed. * empty the cluster alias is unregistered and should be removed.

View File

@ -278,7 +278,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
public Map<String, OriginalIndices> groupIndices(IndicesOptions indicesOptions, String[] indices, Predicate<String> indexExists) { public Map<String, OriginalIndices> groupIndices(IndicesOptions indicesOptions, String[] indices, Predicate<String> indexExists) {
Map<String, OriginalIndices> originalIndicesMap = new HashMap<>(); Map<String, OriginalIndices> originalIndicesMap = new HashMap<>();
if (isCrossClusterSearchEnabled()) { if (isCrossClusterSearchEnabled()) {
final Map<String, List<String>> groupedIndices = groupClusterIndices(indices, indexExists); final Map<String, List<String>> groupedIndices = groupClusterIndices(getRemoteClusterNames(), indices, indexExists);
if (groupedIndices.isEmpty()) { if (groupedIndices.isEmpty()) {
//search on _all in the local cluster if neither local indices nor remote indices were specified //search on _all in the local cluster if neither local indices nor remote indices were specified
originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(Strings.EMPTY_ARRAY, indicesOptions)); originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(Strings.EMPTY_ARRAY, indicesOptions));
@ -380,8 +380,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
return connection; return connection;
} }
@Override Set<String> getRemoteClusterNames() {
protected Set<String> getRemoteClusterNames() {
return this.remoteClusters.keySet(); return this.remoteClusters.keySet();
} }

View File

@ -461,7 +461,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
Collections.emptySet()); Collections.emptySet());
final ClusterService clusterService = mock(ClusterService.class); final ClusterService clusterService = mock(ClusterService.class);
final RepositoriesService repositoriesService = new RepositoriesService(settings, clusterService, final RepositoriesService repositoriesService = new RepositoriesService(settings, clusterService,
transportService, null, threadPool); transportService, Collections.emptyMap(), Collections.emptyMap(), threadPool);
final PeerRecoveryTargetService recoveryTargetService = new PeerRecoveryTargetService(threadPool, final PeerRecoveryTargetService recoveryTargetService = new PeerRecoveryTargetService(threadPool,
transportService, null, clusterService); transportService, null, clusterService);
final ShardStateAction shardStateAction = mock(ShardStateAction.class); final ShardStateAction shardStateAction = mock(ShardStateAction.class);

View File

@ -0,0 +1,101 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.repositories;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.RepositoryPlugin;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class RepositoriesModuleTests extends ESTestCase {
private Environment environment;
private NamedXContentRegistry contentRegistry;
private List<RepositoryPlugin> repoPlugins = new ArrayList<>();
private RepositoryPlugin plugin1;
private RepositoryPlugin plugin2;
private Repository.Factory factory;
@Override
public void setUp() throws Exception {
super.setUp();
environment = mock(Environment.class);
contentRegistry = mock(NamedXContentRegistry.class);
plugin1 = mock(RepositoryPlugin.class);
plugin2 = mock(RepositoryPlugin.class);
factory = mock(Repository.Factory.class);
repoPlugins.add(plugin1);
repoPlugins.add(plugin2);
when(environment.settings()).thenReturn(Settings.EMPTY);
}
public void testCanRegisterTwoRepositoriesWithDifferentTypes() {
when(plugin1.getRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type1", factory));
when(plugin2.getRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type2", factory));
// Would throw
new RepositoriesModule(environment, repoPlugins, mock(TransportService.class), mock(ClusterService.class),
mock(ThreadPool.class), contentRegistry);
}
public void testCannotRegisterTwoRepositoriesWithSameTypes() {
when(plugin1.getRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type1", factory));
when(plugin2.getRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type1", factory));
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
() -> new RepositoriesModule(environment, repoPlugins, mock(TransportService.class), mock(ClusterService.class),
mock(ThreadPool.class), contentRegistry));
assertEquals("Repository type [type1] is already registered", ex.getMessage());
}
public void testCannotRegisterTwoInternalRepositoriesWithSameTypes() {
when(plugin1.getInternalRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type1", factory));
when(plugin2.getInternalRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type1", factory));
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
() -> new RepositoriesModule(environment, repoPlugins, mock(TransportService.class), mock(ClusterService.class),
mock(ThreadPool.class), contentRegistry));
assertEquals("Internal repository type [type1] is already registered", ex.getMessage());
}
public void testCannotRegisterNormalAndInternalRepositoriesWithSameTypes() {
when(plugin1.getRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type1", factory));
when(plugin2.getInternalRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type1", factory));
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
() -> new RepositoriesModule(environment, repoPlugins, mock(TransportService.class), mock(ClusterService.class),
mock(ThreadPool.class), contentRegistry));
assertEquals("Internal repository type [type1] is already registered as a non-internal repository", ex.getMessage());
}
}

View File

@ -0,0 +1,233 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.repositories;
import org.apache.lucene.index.IndexCommit;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.component.LifecycleListener;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.snapshots.SnapshotInfo;
import org.elasticsearch.snapshots.SnapshotShardFailure;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import static org.mockito.Mockito.mock;
public class RepositoriesServiceTests extends ESTestCase {
private RepositoriesService repositoriesService;
@Override
public void setUp() throws Exception {
super.setUp();
ThreadPool threadPool = mock(ThreadPool.class);
final TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), threadPool,
TransportService.NOOP_TRANSPORT_INTERCEPTOR,
boundAddress -> DiscoveryNode.createLocal(Settings.EMPTY, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null,
Collections.emptySet());
repositoriesService = new RepositoriesService(Settings.EMPTY, mock(ClusterService.class),
transportService, Collections.emptyMap(), Collections.singletonMap(TestRepository.TYPE, TestRepository::new), threadPool);
}
public void testRegisterInternalRepository() {
String repoName = "name";
expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(repoName));
repositoriesService.registerInternalRepository(repoName, TestRepository.TYPE);
Repository repository = repositoriesService.repository(repoName);
assertEquals(repoName, repository.getMetadata().name());
assertEquals(TestRepository.TYPE, repository.getMetadata().type());
assertEquals(Settings.EMPTY, repository.getMetadata().settings());
assertTrue(((TestRepository) repository).isStarted);
}
public void testUnregisterInternalRepository() {
String repoName = "name";
expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(repoName));
repositoriesService.registerInternalRepository(repoName, TestRepository.TYPE);
Repository repository = repositoriesService.repository(repoName);
assertFalse(((TestRepository) repository).isClosed);
repositoriesService.unregisterInternalRepository(repoName);
expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(repoName));
assertTrue(((TestRepository) repository).isClosed);
}
public void testRegisterWillNotUpdateIfInternalRepositoryWithNameExists() {
String repoName = "name";
expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(repoName));
repositoriesService.registerInternalRepository(repoName, TestRepository.TYPE);
Repository repository = repositoriesService.repository(repoName);
assertFalse(((TestRepository) repository).isClosed);
repositoriesService.registerInternalRepository(repoName, TestRepository.TYPE);
assertFalse(((TestRepository) repository).isClosed);
Repository repository2 = repositoriesService.repository(repoName);
assertSame(repository, repository2);
}
private static class TestRepository implements Repository {
private static final String TYPE = "internal";
private boolean isClosed;
private boolean isStarted;
private final RepositoryMetaData metaData;
private TestRepository(RepositoryMetaData metaData) {
this.metaData = metaData;
}
@Override
public RepositoryMetaData getMetadata() {
return metaData;
}
@Override
public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) {
return null;
}
@Override
public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) {
return null;
}
@Override
public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId index) throws IOException {
return null;
}
@Override
public RepositoryData getRepositoryData() {
return null;
}
@Override
public void initializeSnapshot(SnapshotId snapshotId, List<IndexId> indices, MetaData metaData) {
}
@Override
public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List<IndexId> indices, long startTime, String failure,
int totalShards, List<SnapshotShardFailure> shardFailures, long repositoryStateId,
boolean includeGlobalState) {
return null;
}
@Override
public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) {
}
@Override
public long getSnapshotThrottleTimeInNanos() {
return 0;
}
@Override
public long getRestoreThrottleTimeInNanos() {
return 0;
}
@Override
public String startVerification() {
return null;
}
@Override
public void endVerification(String verificationToken) {
}
@Override
public void verify(String verificationToken, DiscoveryNode localNode) {
}
@Override
public boolean isReadOnly() {
return false;
}
@Override
public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
IndexShardSnapshotStatus snapshotStatus) {
}
@Override
public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId,
RecoveryState recoveryState) {
}
@Override
public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) {
return null;
}
@Override
public Lifecycle.State lifecycleState() {
return null;
}
@Override
public void addLifecycleListener(LifecycleListener listener) {
}
@Override
public void removeLifecycleListener(LifecycleListener listener) {
}
@Override
public void start() {
isStarted = true;
}
@Override
public void stop() {
}
@Override
public void close() {
isClosed = true;
}
}
}

View File

@ -219,8 +219,9 @@ public class RemoteClusterServiceTests extends ESTestCase {
assertTrue(service.isRemoteClusterRegistered("cluster_1")); assertTrue(service.isRemoteClusterRegistered("cluster_1"));
assertTrue(service.isRemoteClusterRegistered("cluster_2")); assertTrue(service.isRemoteClusterRegistered("cluster_2"));
assertFalse(service.isRemoteClusterRegistered("foo")); assertFalse(service.isRemoteClusterRegistered("foo"));
Map<String, List<String>> perClusterIndices = service.groupClusterIndices(new String[]{"foo:bar", "cluster_1:bar", Map<String, List<String>> perClusterIndices = service.groupClusterIndices(service.getRemoteClusterNames(),
"cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo", "cluster*:baz", "*:boo", "no*match:boo"}, new String[]{"foo:bar", "cluster_1:bar", "cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo",
"cluster*:baz", "*:boo", "no*match:boo"},
i -> false); i -> false);
List<String> localIndices = perClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); List<String> localIndices = perClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
assertNotNull(localIndices); assertNotNull(localIndices);
@ -230,7 +231,7 @@ public class RemoteClusterServiceTests extends ESTestCase {
assertEquals(Arrays.asList("foo:bar", "foo*", "baz", "boo"), perClusterIndices.get("cluster_2")); assertEquals(Arrays.asList("foo:bar", "foo*", "baz", "boo"), perClusterIndices.get("cluster_2"));
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () ->
service.groupClusterIndices(new String[]{"foo:bar", "cluster_1:bar", service.groupClusterIndices(service.getRemoteClusterNames(), new String[]{"foo:bar", "cluster_1:bar",
"cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo"}, "cluster_1:bar"::equals)); "cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo"}, "cluster_1:bar"::equals));
assertEquals("Can not filter indices; index cluster_1:bar exists but there is also a remote cluster named:" + assertEquals("Can not filter indices; index cluster_1:bar exists but there is also a remote cluster named:" +
@ -277,7 +278,7 @@ public class RemoteClusterServiceTests extends ESTestCase {
} }
{ {
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () ->
service.groupClusterIndices(new String[]{"foo:bar", "cluster_1:bar", service.groupClusterIndices(service.getRemoteClusterNames(), new String[]{"foo:bar", "cluster_1:bar",
"cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo"}, "cluster_1:bar"::equals)); "cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo"}, "cluster_1:bar"::equals));
assertEquals("Can not filter indices; index cluster_1:bar exists but there is also a remote cluster named:" + assertEquals("Can not filter indices; index cluster_1:bar exists but there is also a remote cluster named:" +
" cluster_1", iae.getMessage()); " cluster_1", iae.getMessage());

View File

@ -157,5 +157,9 @@ include::authentication/configuring-kerberos-realm.asciidoc[]
include::fips-140-compliance.asciidoc[] include::fips-140-compliance.asciidoc[]
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/settings/security-settings.asciidoc :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/settings/security-settings.asciidoc
include::{es-repo-dir}/settings/security-settings.asciidoc[] include::{es-repo-dir}/settings/security-settings.asciidoc[]
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/reference/files.asciidoc
include::{es-repo-dir}/security/reference/files.asciidoc[]
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/settings/audit-settings.asciidoc :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/settings/audit-settings.asciidoc
include::{es-repo-dir}/settings/audit-settings.asciidoc[] include::{es-repo-dir}/settings/audit-settings.asciidoc[]

View File

@ -1,11 +0,0 @@
[role="xpack"]
[[security-reference]]
== Reference
* <<security-privileges, Security Privileges>>
* {ref}/security-settings.html[Security Settings]
* <<security-files, Security Files>>
* {ref}/security-api.html[Security API]
* {ref}/xpack-commands.html[Security Commands]
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/reference/files.asciidoc
include::{es-repo-dir}/security/reference/files.asciidoc[]

View File

@ -31,15 +31,17 @@
- do: - do:
ccr.get_auto_follow_pattern: ccr.get_auto_follow_pattern:
name: my_pattern name: my_pattern
- match: { my_pattern.remote_cluster: 'local' } - match: { patterns.0.name: 'my_pattern' }
- match: { my_pattern.leader_index_patterns: ['logs-*'] } - match: { patterns.0.pattern.remote_cluster: 'local' }
- match: { my_pattern.max_outstanding_read_requests: 2 } - match: { patterns.0.pattern.leader_index_patterns: ['logs-*'] }
- match: { patterns.0.pattern.max_outstanding_read_requests: 2 }
- do: - do:
ccr.get_auto_follow_pattern: {} ccr.get_auto_follow_pattern: {}
- match: { my_pattern.remote_cluster: 'local' } - match: { patterns.0.name: 'my_pattern' }
- match: { my_pattern.leader_index_patterns: ['logs-*'] } - match: { patterns.0.pattern.remote_cluster: 'local' }
- match: { my_pattern.max_outstanding_read_requests: 2 } - match: { patterns.0.pattern.leader_index_patterns: ['logs-*'] }
- match: { patterns.0.pattern.max_outstanding_read_requests: 2 }
- do: - do:
ccr.delete_auto_follow_pattern: ccr.delete_auto_follow_pattern:

View File

@ -6,9 +6,11 @@
package org.elasticsearch.xpack.ccr; package org.elasticsearch.xpack.ccr;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
@ -32,6 +34,8 @@ import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.EnginePlugin;
import org.elasticsearch.plugins.PersistentTaskPlugin; import org.elasticsearch.plugins.PersistentTaskPlugin;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.RepositoryPlugin;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestHandler; import org.elasticsearch.rest.RestHandler;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
@ -41,46 +45,50 @@ import org.elasticsearch.threadpool.FixedExecutorBuilder;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.watcher.ResourceWatcherService;
import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator; import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator;
import org.elasticsearch.xpack.ccr.action.TransportGetAutoFollowPatternAction;
import org.elasticsearch.xpack.ccr.action.TransportUnfollowAction;
import org.elasticsearch.xpack.ccr.rest.RestGetAutoFollowPatternAction;
import org.elasticsearch.xpack.ccr.action.TransportCcrStatsAction;
import org.elasticsearch.xpack.ccr.rest.RestCcrStatsAction;
import org.elasticsearch.xpack.ccr.rest.RestUnfollowAction;
import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction;
import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction;
import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction;
import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction;
import org.elasticsearch.xpack.ccr.action.ShardChangesAction; import org.elasticsearch.xpack.ccr.action.ShardChangesAction;
import org.elasticsearch.xpack.ccr.action.ShardFollowTask; import org.elasticsearch.xpack.ccr.action.ShardFollowTask;
import org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor; import org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor;
import org.elasticsearch.xpack.ccr.action.TransportFollowStatsAction; import org.elasticsearch.xpack.ccr.action.TransportCcrStatsAction;
import org.elasticsearch.xpack.ccr.action.TransportPutFollowAction;
import org.elasticsearch.xpack.ccr.action.TransportDeleteAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.action.TransportDeleteAutoFollowPatternAction;
import org.elasticsearch.xpack.ccr.action.TransportResumeFollowAction; import org.elasticsearch.xpack.ccr.action.TransportFollowStatsAction;
import org.elasticsearch.xpack.ccr.action.TransportPutAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.action.TransportGetAutoFollowPatternAction;
import org.elasticsearch.xpack.ccr.action.TransportPauseFollowAction; import org.elasticsearch.xpack.ccr.action.TransportPauseFollowAction;
import org.elasticsearch.xpack.ccr.action.TransportPutAutoFollowPatternAction;
import org.elasticsearch.xpack.ccr.action.TransportPutFollowAction;
import org.elasticsearch.xpack.ccr.action.TransportResumeFollowAction;
import org.elasticsearch.xpack.ccr.action.TransportUnfollowAction;
import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsAction; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsAction;
import org.elasticsearch.xpack.ccr.action.bulk.TransportBulkShardOperationsAction; import org.elasticsearch.xpack.ccr.action.bulk.TransportBulkShardOperationsAction;
import org.elasticsearch.xpack.ccr.action.repositories.DeleteInternalCcrRepositoryAction;
import org.elasticsearch.xpack.ccr.action.repositories.PutInternalCcrRepositoryAction;
import org.elasticsearch.xpack.ccr.index.engine.FollowingEngineFactory; import org.elasticsearch.xpack.ccr.index.engine.FollowingEngineFactory;
import org.elasticsearch.xpack.ccr.rest.RestFollowStatsAction; import org.elasticsearch.xpack.ccr.repository.CcrRepository;
import org.elasticsearch.xpack.ccr.rest.RestPutFollowAction; import org.elasticsearch.xpack.ccr.rest.RestCcrStatsAction;
import org.elasticsearch.xpack.ccr.rest.RestDeleteAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.rest.RestDeleteAutoFollowPatternAction;
import org.elasticsearch.xpack.ccr.rest.RestResumeFollowAction; import org.elasticsearch.xpack.ccr.rest.RestFollowStatsAction;
import org.elasticsearch.xpack.ccr.rest.RestPutAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.rest.RestGetAutoFollowPatternAction;
import org.elasticsearch.xpack.ccr.rest.RestPauseFollowAction; import org.elasticsearch.xpack.ccr.rest.RestPauseFollowAction;
import org.elasticsearch.xpack.ccr.rest.RestPutAutoFollowPatternAction;
import org.elasticsearch.xpack.ccr.rest.RestPutFollowAction;
import org.elasticsearch.xpack.ccr.rest.RestResumeFollowAction;
import org.elasticsearch.xpack.ccr.rest.RestUnfollowAction;
import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.XPackPlugin;
import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus;
import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction;
import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction;
import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction;
import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction;
import org.elasticsearch.xpack.core.ccr.action.PauseFollowAction;
import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction;
import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction;
import org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction; import org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction;
import org.elasticsearch.xpack.core.ccr.action.PauseFollowAction;
import org.elasticsearch.xpack.core.ccr.action.UnfollowAction; import org.elasticsearch.xpack.core.ccr.action.UnfollowAction;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Objects; import java.util.Objects;
import java.util.Optional; import java.util.Optional;
import java.util.function.Supplier; import java.util.function.Supplier;
@ -92,7 +100,7 @@ import static org.elasticsearch.xpack.core.XPackSettings.CCR_ENABLED_SETTING;
/** /**
* Container class for CCR functionality. * Container class for CCR functionality.
*/ */
public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, EnginePlugin { public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, EnginePlugin, RepositoryPlugin {
public static final String CCR_THREAD_POOL_NAME = "ccr"; public static final String CCR_THREAD_POOL_NAME = "ccr";
public static final String CCR_CUSTOM_METADATA_KEY = "ccr"; public static final String CCR_CUSTOM_METADATA_KEY = "ccr";
@ -104,6 +112,7 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E
private final boolean enabled; private final boolean enabled;
private final Settings settings; private final Settings settings;
private final CcrLicenseChecker ccrLicenseChecker; private final CcrLicenseChecker ccrLicenseChecker;
private final SetOnce<CcrRepositoryManager> repositoryManager = new SetOnce<>();
/** /**
* Construct an instance of the CCR container with the specified settings. * Construct an instance of the CCR container with the specified settings.
@ -142,6 +151,8 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E
return emptyList(); return emptyList();
} }
this.repositoryManager.set(new CcrRepositoryManager(settings, clusterService, (NodeClient) client));
return Arrays.asList( return Arrays.asList(
ccrLicenseChecker, ccrLicenseChecker,
new AutoFollowCoordinator(settings, client, threadPool, clusterService, ccrLicenseChecker) new AutoFollowCoordinator(settings, client, threadPool, clusterService, ccrLicenseChecker)
@ -166,6 +177,10 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E
// internal actions // internal actions
new ActionHandler<>(BulkShardOperationsAction.INSTANCE, TransportBulkShardOperationsAction.class), new ActionHandler<>(BulkShardOperationsAction.INSTANCE, TransportBulkShardOperationsAction.class),
new ActionHandler<>(ShardChangesAction.INSTANCE, ShardChangesAction.TransportAction.class), new ActionHandler<>(ShardChangesAction.INSTANCE, ShardChangesAction.TransportAction.class),
new ActionHandler<>(PutInternalCcrRepositoryAction.INSTANCE,
PutInternalCcrRepositoryAction.TransportPutInternalRepositoryAction.class),
new ActionHandler<>(DeleteInternalCcrRepositoryAction.INSTANCE,
DeleteInternalCcrRepositoryAction.TransportDeleteInternalRepositoryAction.class),
// stats action // stats action
new ActionHandler<>(FollowStatsAction.INSTANCE, TransportFollowStatsAction.class), new ActionHandler<>(FollowStatsAction.INSTANCE, TransportFollowStatsAction.class),
new ActionHandler<>(CcrStatsAction.INSTANCE, TransportCcrStatsAction.class), new ActionHandler<>(CcrStatsAction.INSTANCE, TransportCcrStatsAction.class),
@ -259,6 +274,12 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E
return Collections.singletonList(new FixedExecutorBuilder(settings, CCR_THREAD_POOL_NAME, 32, 100, "xpack.ccr.ccr_thread_pool")); return Collections.singletonList(new FixedExecutorBuilder(settings, CCR_THREAD_POOL_NAME, 32, 100, "xpack.ccr.ccr_thread_pool"));
} }
@Override
public Map<String, Repository.Factory> getInternalRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) {
Repository.Factory repositoryFactory = (metadata) -> new CcrRepository(metadata, settings);
return Collections.singletonMap(CcrRepository.TYPE, repositoryFactory);
}
protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); }
} }

View File

@ -0,0 +1,48 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ccr;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.transport.RemoteClusterAware;
import org.elasticsearch.xpack.ccr.action.repositories.DeleteInternalCcrRepositoryAction;
import org.elasticsearch.xpack.ccr.action.repositories.DeleteInternalCcrRepositoryRequest;
import org.elasticsearch.xpack.ccr.action.repositories.PutInternalCcrRepositoryAction;
import org.elasticsearch.xpack.ccr.action.repositories.PutInternalCcrRepositoryRequest;
import org.elasticsearch.xpack.ccr.repository.CcrRepository;
import java.util.List;
class CcrRepositoryManager extends RemoteClusterAware {
private final NodeClient client;
CcrRepositoryManager(Settings settings, ClusterService clusterService, NodeClient client) {
super(settings);
this.client = client;
listenForUpdates(clusterService.getClusterSettings());
}
@Override
protected void updateRemoteCluster(String clusterAlias, List<String> addresses, String proxyAddress) {
String repositoryName = CcrRepository.NAME_PREFIX + clusterAlias;
if (addresses.isEmpty()) {
DeleteInternalCcrRepositoryRequest request = new DeleteInternalCcrRepositoryRequest(repositoryName);
PlainActionFuture<DeleteInternalCcrRepositoryAction.DeleteInternalCcrRepositoryResponse> f = PlainActionFuture.newFuture();
client.executeLocally(DeleteInternalCcrRepositoryAction.INSTANCE, request, f);
assert f.isDone() : "Should be completed as it is executed synchronously";
} else {
ActionRequest request = new PutInternalCcrRepositoryRequest(repositoryName, CcrRepository.TYPE);
PlainActionFuture<PutInternalCcrRepositoryAction.PutInternalCcrRepositoryResponse> f = PlainActionFuture.newFuture();
client.executeLocally(PutInternalCcrRepositoryAction.INSTANCE, request, f);
assert f.isDone() : "Should be completed as it is executed synchronously";
}
}
}

View File

@ -0,0 +1,72 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ccr.action.repositories;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportAction;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
public class DeleteInternalCcrRepositoryAction extends Action<DeleteInternalCcrRepositoryAction.DeleteInternalCcrRepositoryResponse> {
public static final DeleteInternalCcrRepositoryAction INSTANCE = new DeleteInternalCcrRepositoryAction();
public static final String NAME = "cluster:admin/ccr/internal_repository/delete";
private DeleteInternalCcrRepositoryAction() {
super(NAME);
}
@Override
public DeleteInternalCcrRepositoryResponse newResponse() {
throw new UnsupportedOperationException();
}
@Override
public Writeable.Reader<DeleteInternalCcrRepositoryResponse> getResponseReader() {
return DeleteInternalCcrRepositoryResponse::new;
}
public static class TransportDeleteInternalRepositoryAction
extends TransportAction<DeleteInternalCcrRepositoryRequest, DeleteInternalCcrRepositoryResponse> {
private final RepositoriesService repositoriesService;
@Inject
public TransportDeleteInternalRepositoryAction(RepositoriesService repositoriesService, ActionFilters actionFilters,
TransportService transportService) {
super(NAME, actionFilters, transportService.getTaskManager());
this.repositoriesService = repositoriesService;
}
@Override
protected void doExecute(Task task, DeleteInternalCcrRepositoryRequest request,
ActionListener<DeleteInternalCcrRepositoryResponse> listener) {
repositoriesService.unregisterInternalRepository(request.getName());
listener.onResponse(new DeleteInternalCcrRepositoryResponse());
}
}
public static class DeleteInternalCcrRepositoryResponse extends ActionResponse {
DeleteInternalCcrRepositoryResponse() {
super();
}
DeleteInternalCcrRepositoryResponse(StreamInput streamInput) throws IOException {
super(streamInput);
}
}
}

View File

@ -0,0 +1,63 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ccr.action.repositories;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
import java.util.Objects;
public class DeleteInternalCcrRepositoryRequest extends ActionRequest {
private final String name;
public DeleteInternalCcrRepositoryRequest(String name) {
this.name = Objects.requireNonNull(name);
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
throw new UnsupportedOperationException("DeleteInternalRepositoryRequest cannot be serialized for sending across the wire.");
}
@Override
public void writeTo(StreamOutput out) throws IOException {
throw new UnsupportedOperationException("DeleteInternalRepositoryRequest cannot be serialized for sending across the wire.");
}
public String getName() {
return name;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DeleteInternalCcrRepositoryRequest that = (DeleteInternalCcrRepositoryRequest) o;
return Objects.equals(name, that.name);
}
@Override
public int hashCode() {
return Objects.hash(name);
}
@Override
public String toString() {
return "DeleteInternalRepositoryRequest{" +
"name='" + name + '\'' +
'}';
}
}

View File

@ -0,0 +1,72 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ccr.action.repositories;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportAction;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
public class PutInternalCcrRepositoryAction extends Action<PutInternalCcrRepositoryAction.PutInternalCcrRepositoryResponse> {
public static final PutInternalCcrRepositoryAction INSTANCE = new PutInternalCcrRepositoryAction();
public static final String NAME = "cluster:admin/ccr/internal_repository/put";
private PutInternalCcrRepositoryAction() {
super(NAME);
}
@Override
public PutInternalCcrRepositoryResponse newResponse() {
throw new UnsupportedOperationException();
}
@Override
public Writeable.Reader<PutInternalCcrRepositoryResponse> getResponseReader() {
return PutInternalCcrRepositoryResponse::new;
}
public static class TransportPutInternalRepositoryAction
extends TransportAction<PutInternalCcrRepositoryRequest, PutInternalCcrRepositoryResponse> {
private final RepositoriesService repositoriesService;
@Inject
public TransportPutInternalRepositoryAction(RepositoriesService repositoriesService, ActionFilters actionFilters,
TransportService transportService) {
super(NAME, actionFilters, transportService.getTaskManager());
this.repositoriesService = repositoriesService;
}
@Override
protected void doExecute(Task task, PutInternalCcrRepositoryRequest request,
ActionListener<PutInternalCcrRepositoryResponse> listener) {
repositoriesService.registerInternalRepository(request.getName(), request.getType());
listener.onResponse(new PutInternalCcrRepositoryResponse());
}
}
public static class PutInternalCcrRepositoryResponse extends ActionResponse {
PutInternalCcrRepositoryResponse() {
super();
}
PutInternalCcrRepositoryResponse(StreamInput streamInput) throws IOException {
super(streamInput);
}
}
}

View File

@ -0,0 +1,71 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ccr.action.repositories;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
import java.util.Objects;
public class PutInternalCcrRepositoryRequest extends ActionRequest {
private final String name;
private final String type;
public PutInternalCcrRepositoryRequest(String name, String type) {
this.name = Objects.requireNonNull(name);
this.type = Objects.requireNonNull(type);
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
public void readFrom(StreamInput in) throws IOException {
throw new UnsupportedOperationException("PutInternalRepositoryRequest cannot be serialized for sending across the wire.");
}
@Override
public void writeTo(StreamOutput out) throws IOException {
throw new UnsupportedOperationException("PutInternalRepositoryRequest cannot be serialized for sending across the wire.");
}
public String getName() {
return name;
}
public String getType() {
return type;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PutInternalCcrRepositoryRequest that = (PutInternalCcrRepositoryRequest) o;
return Objects.equals(name, that.name) &&
Objects.equals(type, that.type);
}
@Override
public int hashCode() {
return Objects.hash(name, type);
}
@Override
public String toString() {
return "PutInternalCcrRepositoryRequest{" +
"name='" + name + '\'' +
", type='" + type + '\'' +
'}';
}
}

View File

@ -0,0 +1,149 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ccr.repository;
import org.apache.lucene.index.IndexCommit;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.repositories.IndexId;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.RepositoryData;
import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.snapshots.SnapshotInfo;
import org.elasticsearch.snapshots.SnapshotShardFailure;
import java.io.IOException;
import java.util.List;
/**
* This repository relies on a remote cluster for Ccr restores. It is read-only so it can only be used to
* restore shards/indexes that exist on the remote cluster.
*/
public class CcrRepository extends AbstractLifecycleComponent implements Repository {
public static final String TYPE = "_ccr_";
public static final String NAME_PREFIX = "_ccr_";
private final RepositoryMetaData metadata;
public CcrRepository(RepositoryMetaData metadata, Settings settings) {
super(settings);
this.metadata = metadata;
}
@Override
protected void doStart() {
}
@Override
protected void doStop() {
}
@Override
protected void doClose() throws IOException {
}
@Override
public RepositoryMetaData getMetadata() {
return metadata;
}
@Override
public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) {
throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE);
}
@Override
public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) {
throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE);
}
@Override
public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId index) throws IOException {
throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE);
}
@Override
public RepositoryData getRepositoryData() {
throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE);
}
@Override
public void initializeSnapshot(SnapshotId snapshotId, List<IndexId> indices, MetaData metaData) {
throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE);
}
@Override
public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List<IndexId> indices, long startTime, String failure, int totalShards,
List<SnapshotShardFailure> shardFailures, long repositoryStateId, boolean includeGlobalState) {
throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE);
}
@Override
public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) {
throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE);
}
@Override
public long getSnapshotThrottleTimeInNanos() {
throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE);
}
@Override
public long getRestoreThrottleTimeInNanos() {
return 0;
}
@Override
public String startVerification() {
throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE);
}
@Override
public void endVerification(String verificationToken) {
throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE);
}
@Override
public void verify(String verificationToken, DiscoveryNode localNode) {
}
@Override
public boolean isReadOnly() {
return true;
}
@Override
public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit,
IndexShardSnapshotStatus snapshotStatus) {
throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE);
}
@Override
public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId,
RecoveryState recoveryState) {
throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE);
}
@Override
public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) {
throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE);
}
}

View File

@ -0,0 +1,62 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ccr;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.RepositoryMissingException;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xpack.CcrIntegTestCase;
import org.elasticsearch.xpack.ccr.repository.CcrRepository;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
// TODO: Fold this integration test into a more expansive integration test as more bootstrap from remote work
// TODO: is completed.
public class CcrRepositoryManagerIT extends CcrIntegTestCase {
public void testThatRepositoryIsPutAndRemovedWhenRemoteClusterIsUpdated() throws Exception {
String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster";
final RepositoriesService repositoriesService =
getFollowerCluster().getDataOrMasterNodeInstances(RepositoriesService.class).iterator().next();
try {
Repository repository = repositoriesService.repository(leaderClusterRepoName);
assertEquals(CcrRepository.TYPE, repository.getMetadata().type());
assertEquals(leaderClusterRepoName, repository.getMetadata().name());
} catch (RepositoryMissingException e) {
fail("need repository");
}
ClusterUpdateSettingsRequest putFollowerRequest = new ClusterUpdateSettingsRequest();
String address = getFollowerCluster().getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString();
putFollowerRequest.persistentSettings(Settings.builder().put("cluster.remote.follower_cluster_copy.seeds", address));
assertAcked(followerClient().admin().cluster().updateSettings(putFollowerRequest).actionGet());
String followerCopyRepoName = CcrRepository.NAME_PREFIX + "follower_cluster_copy";
try {
Repository repository = repositoriesService.repository(followerCopyRepoName);
assertEquals(CcrRepository.TYPE, repository.getMetadata().type());
assertEquals(followerCopyRepoName, repository.getMetadata().name());
} catch (RepositoryMissingException e) {
fail("need repository");
}
ClusterUpdateSettingsRequest deleteLeaderRequest = new ClusterUpdateSettingsRequest();
deleteLeaderRequest.persistentSettings(Settings.builder().put("cluster.remote.leader_cluster.seeds", ""));
assertAcked(followerClient().admin().cluster().updateSettings(deleteLeaderRequest).actionGet());
expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(leaderClusterRepoName));
ClusterUpdateSettingsRequest deleteFollowerRequest = new ClusterUpdateSettingsRequest();
deleteFollowerRequest.persistentSettings(Settings.builder().put("cluster.remote.follower_cluster_copy.seeds", ""));
assertAcked(followerClient().admin().cluster().updateSettings(deleteFollowerRequest).actionGet());
expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(followerCopyRepoName));
}
}

View File

@ -111,10 +111,21 @@ public class GetAutoFollowPatternAction extends Action<GetAutoFollowPatternActio
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(); builder.startObject();
for (Map.Entry<String, AutoFollowPattern> entry : autoFollowPatterns.entrySet()) { {
builder.startObject(entry.getKey()); builder.startArray("patterns");
entry.getValue().toXContent(builder, params); for (Map.Entry<String, AutoFollowPattern> entry : autoFollowPatterns.entrySet()) {
builder.endObject(); builder.startObject();
{
builder.field("name", entry.getKey());
builder.startObject("pattern");
{
entry.getValue().toXContent(builder, params);
}
builder.endObject();
}
builder.endObject();
}
builder.endArray();
} }
builder.endObject(); builder.endObject();
return builder; return builder;

View File

@ -53,7 +53,9 @@ import org.elasticsearch.plugins.MapperPlugin;
import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.NetworkPlugin;
import org.elasticsearch.plugins.PersistentTaskPlugin; import org.elasticsearch.plugins.PersistentTaskPlugin;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.RepositoryPlugin;
import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.plugins.ScriptPlugin;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestHandler; import org.elasticsearch.rest.RestHandler;
import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptContext;
@ -393,6 +395,21 @@ public class LocalStateCompositeXPackPlugin extends XPackPlugin implements Scrip
.collect(toList()); .collect(toList());
} }
@Override
public Map<String, Repository.Factory> getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) {
HashMap<String, Repository.Factory> repositories = new HashMap<>(super.getRepositories(env, namedXContentRegistry));
filterPlugins(RepositoryPlugin.class).forEach(r -> repositories.putAll(r.getRepositories(env, namedXContentRegistry)));
return repositories;
}
@Override
public Map<String, Repository.Factory> getInternalRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) {
HashMap<String, Repository.Factory> internalRepositories = new HashMap<>(super.getInternalRepositories(env, namedXContentRegistry));
filterPlugins(RepositoryPlugin.class).forEach(r ->
internalRepositories.putAll(r.getInternalRepositories(env, namedXContentRegistry)));
return internalRepositories;
}
@Override @Override
public void close() throws IOException { public void close() throws IOException {
IOUtils.close(plugins); IOUtils.close(plugins);

View File

@ -104,10 +104,10 @@ public class ESNativeRealmMigrateTool extends LoggingAwareMultiCommand {
super("Migrates users or roles from file to native realm"); super("Migrates users or roles from file to native realm");
this.username = parser.acceptsAll(Arrays.asList("u", "username"), this.username = parser.acceptsAll(Arrays.asList("u", "username"),
"User used to authenticate with Elasticsearch") "User used to authenticate with Elasticsearch")
.withRequiredArg(); .withRequiredArg().required();
this.password = parser.acceptsAll(Arrays.asList("p", "password"), this.password = parser.acceptsAll(Arrays.asList("p", "password"),
"Password used to authenticate with Elasticsearch") "Password used to authenticate with Elasticsearch")
.withRequiredArg(); .withRequiredArg().required();
this.url = parser.acceptsAll(Arrays.asList("U", "url"), this.url = parser.acceptsAll(Arrays.asList("U", "url"),
"URL of Elasticsearch host") "URL of Elasticsearch host")
.withRequiredArg(); .withRequiredArg();

View File

@ -70,7 +70,7 @@ class IndicesAndAliasesResolver {
* then the index names will be categorized into those that refer to {@link ResolvedIndices#getLocal() local indices}, and those that * then the index names will be categorized into those that refer to {@link ResolvedIndices#getLocal() local indices}, and those that
* refer to {@link ResolvedIndices#getRemote() remote indices}. This categorization follows the standard * refer to {@link ResolvedIndices#getRemote() remote indices}. This categorization follows the standard
* {@link RemoteClusterAware#buildRemoteIndexName(String, String) remote index-name format} and also respects the currently defined * {@link RemoteClusterAware#buildRemoteIndexName(String, String) remote index-name format} and also respects the currently defined
* {@link RemoteClusterAware#getRemoteClusterNames() remote clusters}. * remote clusters}.
* </p><br> * </p><br>
* Thus an index name <em>N</em> will considered to be <em>remote</em> if-and-only-if all of the following are true * Thus an index name <em>N</em> will considered to be <em>remote</em> if-and-only-if all of the following are true
* <ul> * <ul>
@ -438,11 +438,6 @@ class IndicesAndAliasesResolver {
listenForUpdates(clusterSettings); listenForUpdates(clusterSettings);
} }
@Override
protected Set<String> getRemoteClusterNames() {
return clusters;
}
@Override @Override
protected void updateRemoteCluster(String clusterAlias, List<String> addresses, String proxyAddress) { protected void updateRemoteCluster(String clusterAlias, List<String> addresses, String proxyAddress) {
if (addresses.isEmpty()) { if (addresses.isEmpty()) {
@ -453,7 +448,7 @@ class IndicesAndAliasesResolver {
} }
ResolvedIndices splitLocalAndRemoteIndexNames(String... indices) { ResolvedIndices splitLocalAndRemoteIndexNames(String... indices) {
final Map<String, List<String>> map = super.groupClusterIndices(indices, exists -> false); final Map<String, List<String>> map = super.groupClusterIndices(clusters, indices, exists -> false);
final List<String> local = map.remove(LOCAL_CLUSTER_GROUP_KEY); final List<String> local = map.remove(LOCAL_CLUSTER_GROUP_KEY);
final List<String> remote = map.entrySet().stream() final List<String> remote = map.entrySet().stream()
.flatMap(e -> e.getValue().stream().map(v -> e.getKey() + REMOTE_CLUSTER_INDEX_SEPARATOR + v)) .flatMap(e -> e.getValue().stream().map(v -> e.getKey() + REMOTE_CLUSTER_INDEX_SEPARATOR + v))

View File

@ -5,6 +5,7 @@
*/ */
package org.elasticsearch.xpack.security.authc.esnative; package org.elasticsearch.xpack.security.authc.esnative;
import joptsimple.OptionException;
import joptsimple.OptionParser; import joptsimple.OptionParser;
import joptsimple.OptionSet; import joptsimple.OptionSet;
import org.elasticsearch.cli.MockTerminal; import org.elasticsearch.cli.MockTerminal;
@ -24,6 +25,7 @@ import java.util.Arrays;
import java.util.HashSet; import java.util.HashSet;
import java.util.Set; import java.util.Set;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.is;
/** /**
@ -155,4 +157,13 @@ public class ESNativeMigrateToolTests extends NativeRealmIntegTestCase {
assertThat("expected list to contain: " + r, roles.contains(r), is(true)); assertThat("expected list to contain: " + r, roles.contains(r), is(true));
} }
} }
public void testMissingPasswordParameter() {
ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles();
final OptionException ex = expectThrows(OptionException.class,
() -> muor.getParser().parse("-u", "elastic", "-U", "http://localhost:9200"));
assertThat(ex.getMessage(), containsString("password"));
}
} }

View File

@ -13,7 +13,6 @@ import org.apache.kerby.kerberos.kerb.KrbException;
import org.apache.kerby.kerberos.kerb.client.KrbConfig; import org.apache.kerby.kerberos.kerb.client.KrbConfig;
import org.apache.kerby.kerberos.kerb.server.KdcConfigKey; import org.apache.kerby.kerberos.kerb.server.KdcConfigKey;
import org.apache.kerby.kerberos.kerb.server.SimpleKdcServer; import org.apache.kerby.kerberos.kerb.server.SimpleKdcServer;
import org.apache.kerby.util.NetworkUtil;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
@ -22,6 +21,9 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import java.io.IOException; import java.io.IOException;
import java.net.DatagramSocket;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
@ -31,6 +33,8 @@ import java.security.PrivilegedExceptionAction;
import java.util.Locale; import java.util.Locale;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import javax.net.ServerSocketFactory;
/** /**
* Utility wrapper around Apache {@link SimpleKdcServer} backed by Unboundid * Utility wrapper around Apache {@link SimpleKdcServer} backed by Unboundid
* {@link InMemoryDirectoryServer}.<br> * {@link InMemoryDirectoryServer}.<br>
@ -127,14 +131,14 @@ public class SimpleKdcLdapServer {
simpleKdc.setWorkDir(workDir.toFile()); simpleKdc.setWorkDir(workDir.toFile());
simpleKdc.setKdcHost(host); simpleKdc.setKdcHost(host);
simpleKdc.setKdcRealm(realm); simpleKdc.setKdcRealm(realm);
if (kdcPort == 0) {
kdcPort = NetworkUtil.getServerPort();
}
if (transport != null) { if (transport != null) {
if (transport.trim().equals("TCP")) { if (kdcPort == 0) {
kdcPort = getServerPort(transport);
}
if (transport.trim().equalsIgnoreCase("TCP")) {
simpleKdc.setKdcTcpPort(kdcPort); simpleKdc.setKdcTcpPort(kdcPort);
simpleKdc.setAllowUdp(false); simpleKdc.setAllowUdp(false);
} else if (transport.trim().equals("UDP")) { } else if (transport.trim().equalsIgnoreCase("UDP")) {
simpleKdc.setKdcUdpPort(kdcPort); simpleKdc.setKdcUdpPort(kdcPort);
simpleKdc.setAllowTcp(false); simpleKdc.setAllowTcp(false);
} else { } else {
@ -221,4 +225,21 @@ public class SimpleKdcLdapServer {
logger.info("SimpleKdcServer stoppped."); logger.info("SimpleKdcServer stoppped.");
} }
private static int getServerPort(String transport) {
if (transport != null && transport.trim().equalsIgnoreCase("TCP")) {
try (ServerSocket serverSocket = ServerSocketFactory.getDefault().createServerSocket(0, 1,
InetAddress.getByName("127.0.0.1"))) {
return serverSocket.getLocalPort();
} catch (Exception ex) {
throw new RuntimeException("Failed to get a TCP server socket point");
}
} else if (transport != null && transport.trim().equalsIgnoreCase("UDP")) {
try (DatagramSocket socket = new DatagramSocket(0, InetAddress.getByName("127.0.0.1"))) {
return socket.getLocalPort();
} catch (Exception ex) {
throw new RuntimeException("Failed to get a UDP server socket point");
}
}
throw new IllegalArgumentException("Invalid transport: " + transport);
}
} }