diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 5b02ec6b35c..8329712c91f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -58,10 +58,10 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.core.CountRequest; import org.elasticsearch.client.core.CountResponse; -import org.elasticsearch.client.core.TermVectorsResponse; -import org.elasticsearch.client.core.TermVectorsRequest; import org.elasticsearch.client.core.MultiTermVectorsRequest; import org.elasticsearch.client.core.MultiTermVectorsResponse; +import org.elasticsearch.client.core.TermVectorsRequest; +import org.elasticsearch.client.core.TermVectorsResponse; import org.elasticsearch.client.tasks.TaskSubmissionResponse; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedFunction; @@ -139,6 +139,7 @@ import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles; import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentileRanks; import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.ParsedAvg; import org.elasticsearch.search.aggregations.metrics.ParsedCardinality; @@ -148,6 +149,7 @@ import org.elasticsearch.search.aggregations.metrics.ParsedGeoCentroid; import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentileRanks; import org.elasticsearch.search.aggregations.metrics.ParsedHDRPercentiles; import org.elasticsearch.search.aggregations.metrics.ParsedMax; +import org.elasticsearch.search.aggregations.metrics.ParsedMedianAbsoluteDeviation; import org.elasticsearch.search.aggregations.metrics.ParsedMin; import org.elasticsearch.search.aggregations.metrics.ParsedScriptedMetric; import org.elasticsearch.search.aggregations.metrics.ParsedStats; @@ -161,20 +163,18 @@ import org.elasticsearch.search.aggregations.metrics.StatsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.ParsedMedianAbsoluteDeviation; -import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue; -import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; -import org.elasticsearch.search.aggregations.pipeline.ParsedBucketMetricValue; -import org.elasticsearch.search.aggregations.pipeline.ParsedPercentilesBucket; -import org.elasticsearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.ParsedStatsBucket; -import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.ParsedExtendedStatsBucket; import org.elasticsearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; +import org.elasticsearch.search.aggregations.pipeline.ParsedBucketMetricValue; import org.elasticsearch.search.aggregations.pipeline.ParsedDerivative; +import org.elasticsearch.search.aggregations.pipeline.ParsedExtendedStatsBucket; +import org.elasticsearch.search.aggregations.pipeline.ParsedPercentilesBucket; +import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue; +import org.elasticsearch.search.aggregations.pipeline.ParsedStatsBucket; +import org.elasticsearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; @@ -203,13 +203,33 @@ import static java.util.Collections.singleton; import static java.util.stream.Collectors.toList; /** - * High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses. - * The {@link RestClient} instance is internally built based on the provided {@link RestClientBuilder} and it gets closed automatically - * when closing the {@link RestHighLevelClient} instance that wraps it. + * High level REST client that wraps an instance of the low level {@link RestClient} and allows to build requests and read responses. The + * {@link RestClient} instance is internally built based on the provided {@link RestClientBuilder} and it gets closed automatically when + * closing the {@link RestHighLevelClient} instance that wraps it. + *

+ * * In case an already existing instance of a low-level REST client needs to be provided, this class can be subclassed and the - * {@link #RestHighLevelClient(RestClient, CheckedConsumer, List)} constructor can be used. - * This class can also be sub-classed to expose additional client methods that make use of endpoints added to Elasticsearch through - * plugins, or to add support for custom response sections, again added to Elasticsearch through plugins. + * {@link #RestHighLevelClient(RestClient, CheckedConsumer, List)} constructor can be used. + *

+ * + * This class can also be sub-classed to expose additional client methods that make use of endpoints added to Elasticsearch through plugins, + * or to add support for custom response sections, again added to Elasticsearch through plugins. + *

+ * + * The majority of the methods in this class come in two flavors, a blocking and an asynchronous version (e.g. + * {@link #search(SearchRequest, RequestOptions)} and {@link #searchAsync(SearchRequest, RequestOptions, ActionListener)}, where the later + * takes an implementation of an {@link ActionListener} as an argument that needs to implement methods that handle successful responses and + * failure scenarios. Most of the blocking calls can throw an {@link IOException} or an unchecked {@link ElasticsearchException} in the + * following cases: + * + *

+ * */ public class RestHighLevelClient implements Closeable { @@ -448,7 +468,6 @@ public class RestHighLevelClient implements Closeable { * @param bulkRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final BulkResponse bulk(BulkRequest bulkRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, emptySet()); @@ -471,7 +490,6 @@ public class RestHighLevelClient implements Closeable { * @param reindexRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final BulkByScrollResponse reindex(ReindexRequest reindexRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity( @@ -485,7 +503,6 @@ public class RestHighLevelClient implements Closeable { * @param reindexRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the submission response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final TaskSubmissionResponse submitReindexTask(ReindexRequest reindexRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity( @@ -513,7 +530,6 @@ public class RestHighLevelClient implements Closeable { * @param updateByQueryRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final BulkByScrollResponse updateByQuery(UpdateByQueryRequest updateByQueryRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity( @@ -543,7 +559,6 @@ public class RestHighLevelClient implements Closeable { * @param deleteByQueryRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final BulkByScrollResponse deleteByQuery(DeleteByQueryRequest deleteByQueryRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity( @@ -573,7 +588,6 @@ public class RestHighLevelClient implements Closeable { * @param rethrottleRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final ListTasksResponse deleteByQueryRethrottle(RethrottleRequest rethrottleRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(rethrottleRequest, RequestConverters::rethrottleDeleteByQuery, options, @@ -601,7 +615,6 @@ public class RestHighLevelClient implements Closeable { * @param rethrottleRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final ListTasksResponse updateByQueryRethrottle(RethrottleRequest rethrottleRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(rethrottleRequest, RequestConverters::rethrottleUpdateByQuery, options, @@ -630,7 +643,6 @@ public class RestHighLevelClient implements Closeable { * @param rethrottleRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final ListTasksResponse reindexRethrottle(RethrottleRequest rethrottleRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(rethrottleRequest, RequestConverters::rethrottleReindex, options, @@ -656,7 +668,6 @@ public class RestHighLevelClient implements Closeable { * Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return true if the ping succeeded, false otherwise - * @throws IOException in case there is a problem sending the request */ public final boolean ping(RequestOptions options) throws IOException { return performRequest(new MainRequest(), (request) -> RequestConverters.ping(), options, RestHighLevelClient::convertExistsResponse, @@ -667,7 +678,6 @@ public class RestHighLevelClient implements Closeable { * Get the cluster info otherwise provided when sending an HTTP request to '/' * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final MainResponse info(RequestOptions options) throws IOException { return performRequestAndParseEntity(new MainRequest(), (request) -> RequestConverters.info(), options, @@ -680,7 +690,6 @@ public class RestHighLevelClient implements Closeable { * @param getRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final GetResponse get(GetRequest getRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(getRequest, RequestConverters::get, options, GetResponse::fromXContent, singleton(404)); @@ -704,7 +713,6 @@ public class RestHighLevelClient implements Closeable { * @param multiGetRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response * @deprecated use {@link #mget(MultiGetRequest, RequestOptions)} instead */ @Deprecated @@ -719,7 +727,6 @@ public class RestHighLevelClient implements Closeable { * @param multiGetRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final MultiGetResponse mget(MultiGetRequest multiGetRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(multiGetRequest, RequestConverters::multiGet, options, MultiGetResponse::fromXContent, @@ -757,7 +764,6 @@ public class RestHighLevelClient implements Closeable { * @param getRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return true if the document exists, false otherwise - * @throws IOException in case there is a problem sending the request */ public final boolean exists(GetRequest getRequest, RequestOptions options) throws IOException { return performRequest(getRequest, RequestConverters::exists, options, RestHighLevelClient::convertExistsResponse, emptySet()); @@ -777,20 +783,19 @@ public class RestHighLevelClient implements Closeable { /** * Checks for the existence of a document with a "_source" field. Returns true if it exists, false otherwise. - * See Source exists API + * See Source exists API * on elastic.co * @param getRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return true if the document and _source field exists, false otherwise - * @throws IOException in case there is a problem sending the request */ public boolean existsSource(GetRequest getRequest, RequestOptions options) throws IOException { return performRequest(getRequest, RequestConverters::sourceExists, options, RestHighLevelClient::convertExistsResponse, emptySet()); - } - + } + /** * Asynchronously checks for the existence of a document with a "_source" field. Returns true if it exists, false otherwise. - * See Source exists API + * See Source exists API * on elastic.co * @param getRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized @@ -799,15 +804,14 @@ public class RestHighLevelClient implements Closeable { public final void existsSourceAsync(GetRequest getRequest, RequestOptions options, ActionListener listener) { performRequestAsync(getRequest, RequestConverters::sourceExists, options, RestHighLevelClient::convertExistsResponse, listener, emptySet()); - } - + } + /** * Index a document using the Index API. * See Index API on elastic.co * @param indexRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final IndexResponse index(IndexRequest indexRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, emptySet()); @@ -831,7 +835,6 @@ public class RestHighLevelClient implements Closeable { * @param countRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final CountResponse count(CountRequest countRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(countRequest, RequestConverters::count, options, CountResponse::fromXContent, @@ -856,7 +859,6 @@ public class RestHighLevelClient implements Closeable { * @param updateRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final UpdateResponse update(UpdateRequest updateRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(updateRequest, RequestConverters::update, options, UpdateResponse::fromXContent, emptySet()); @@ -877,10 +879,9 @@ public class RestHighLevelClient implements Closeable { /** * Deletes a document by id using the Delete API. * See Delete API on elastic.co - * @param deleteRequest the reuqest + * @param deleteRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final DeleteResponse delete(DeleteRequest deleteRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(deleteRequest, RequestConverters::delete, options, DeleteResponse::fromXContent, @@ -905,7 +906,6 @@ public class RestHighLevelClient implements Closeable { * @param searchRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final SearchResponse search(SearchRequest searchRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(searchRequest, RequestConverters::search, options, SearchResponse::fromXContent, emptySet()); @@ -930,7 +930,6 @@ public class RestHighLevelClient implements Closeable { * @param multiSearchRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response * @deprecated use {@link #msearch(MultiSearchRequest, RequestOptions)} instead */ @Deprecated @@ -945,7 +944,6 @@ public class RestHighLevelClient implements Closeable { * @param multiSearchRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final MultiSearchResponse msearch(MultiSearchRequest multiSearchRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(multiSearchRequest, RequestConverters::multiSearch, options, MultiSearchResponse::fromXContext, @@ -988,7 +986,6 @@ public class RestHighLevelClient implements Closeable { * @param searchScrollRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response * @deprecated use {@link #scroll(SearchScrollRequest, RequestOptions)} instead */ @Deprecated @@ -1003,7 +1000,6 @@ public class RestHighLevelClient implements Closeable { * @param searchScrollRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final SearchResponse scroll(SearchScrollRequest searchScrollRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, options, SearchResponse::fromXContent, @@ -1046,7 +1042,6 @@ public class RestHighLevelClient implements Closeable { * @param clearScrollRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, options, ClearScrollResponse::fromXContent, @@ -1057,7 +1052,7 @@ public class RestHighLevelClient implements Closeable { * Asynchronously clears one or more scroll ids using the Clear Scroll API. * See * Clear Scroll API on elastic.co - * @param clearScrollRequest the reuqest + * @param clearScrollRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion */ @@ -1074,7 +1069,6 @@ public class RestHighLevelClient implements Closeable { * @param searchTemplateRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final SearchTemplateResponse searchTemplate(SearchTemplateRequest searchTemplateRequest, RequestOptions options) throws IOException { @@ -1100,7 +1094,6 @@ public class RestHighLevelClient implements Closeable { * @param explainRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final ExplainResponse explain(ExplainRequest explainRequest, RequestOptions options) throws IOException { return performRequest(explainRequest, RequestConverters::explain, options, @@ -1198,7 +1191,6 @@ public class RestHighLevelClient implements Closeable { * @param rankEvalRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final RankEvalResponse rankEval(RankEvalRequest rankEvalRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(rankEvalRequest, RequestConverters::rankEval, options, RankEvalResponse::fromXContent, @@ -1251,7 +1243,6 @@ public class RestHighLevelClient implements Closeable { * @param fieldCapabilitiesRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest, RequestOptions options) throws IOException { @@ -1266,7 +1257,6 @@ public class RestHighLevelClient implements Closeable { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public GetStoredScriptResponse getScript(GetStoredScriptRequest request, RequestOptions options) throws IOException { return performRequestAndParseEntity(request, RequestConverters::getScript, options, @@ -1294,7 +1284,6 @@ public class RestHighLevelClient implements Closeable { * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public AcknowledgedResponse deleteScript(DeleteStoredScriptRequest request, RequestOptions options) throws IOException { return performRequestAndParseEntity(request, RequestConverters::deleteScript, options, @@ -1322,7 +1311,6 @@ public class RestHighLevelClient implements Closeable { * @param putStoredScriptRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response */ public AcknowledgedResponse putScript(PutStoredScriptRequest putStoredScriptRequest, RequestOptions options) throws IOException { @@ -1450,9 +1438,9 @@ public class RestHighLevelClient implements Closeable { throw new IOException("Unable to parse response body for " + response, e); } } - + /** - * Defines a helper method for requests that can 404 and in which case will return an empty Optional + * Defines a helper method for requests that can 404 and in which case will return an empty Optional * otherwise tries to parse the response body */ protected final Optional performRequestAndParseOptionalEntity(Req request, @@ -1481,7 +1469,7 @@ public class RestHighLevelClient implements Closeable { } catch (Exception e) { throw new IOException("Unable to parse response body for " + response, e); } - } + } /** * @deprecated If creating a new HLRC ReST API call, consider creating new actions instead of reusing server actions. The Validation @@ -1603,9 +1591,9 @@ public class RestHighLevelClient implements Closeable { } }; } - + /** - * Async request which returns empty Optionals in the case of 404s or parses entity into an Optional + * Asynchronous request which returns empty {@link Optional}s in the case of 404s or parses entity into an Optional */ protected final void performRequestAsyncAndParseOptionalEntity(Req request, CheckedFunction requestConverter, @@ -1625,11 +1613,11 @@ public class RestHighLevelClient implements Closeable { return; } req.setOptions(options); - ResponseListener responseListener = wrapResponseListener404sOptional(response -> parseEntity(response.getEntity(), + ResponseListener responseListener = wrapResponseListener404sOptional(response -> parseEntity(response.getEntity(), entityParser), listener); - client.performRequestAsync(req, responseListener); - } - + client.performRequestAsync(req, responseListener); + } + final ResponseListener wrapResponseListener404sOptional(CheckedFunction responseConverter, ActionListener> actionListener) { return new ResponseListener() { @@ -1658,7 +1646,7 @@ public class RestHighLevelClient implements Closeable { } } }; - } + } /** * Converts a {@link ResponseException} obtained from the low level REST client into an {@link ElasticsearchException}. diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponse.java index f4afb2d650e..ce42c98e57c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponse.java @@ -19,41 +19,59 @@ package org.elasticsearch.client.ccr; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentParser.Token; -import java.io.IOException; +import java.util.AbstractMap; import java.util.Collections; -import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.NavigableMap; import java.util.Objects; +import java.util.TreeMap; +import java.util.stream.Collectors; public final class GetAutoFollowPatternResponse { - public static GetAutoFollowPatternResponse fromXContent(final XContentParser parser) throws IOException { - final Map patterns = new HashMap<>(); - for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { - if (token == Token.FIELD_NAME) { - final String name = parser.currentName(); - final Pattern pattern = Pattern.PARSER.parse(parser, null); - patterns.put(name, pattern); - } - } - return new GetAutoFollowPatternResponse(patterns); + static final ParseField PATTERNS_FIELD = new ParseField("patterns"); + static final ParseField NAME_FIELD = new ParseField("name"); + static final ParseField PATTERN_FIELD = new ParseField("pattern"); + + private static final ConstructingObjectParser, Void> ENTRY_PARSER = new ConstructingObjectParser<>( + "get_auto_follow_pattern_response", args -> new AbstractMap.SimpleEntry<>((String) args[0], (Pattern) args[1])); + + static { + ENTRY_PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME_FIELD); + ENTRY_PARSER.declareObject(ConstructingObjectParser.constructorArg(), Pattern.PARSER, PATTERN_FIELD); } - private final Map patterns; + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_auto_follow_pattern_response", args -> { + @SuppressWarnings("unchecked") + List> entries = (List>) args[0]; + return new GetAutoFollowPatternResponse(new TreeMap<>(entries.stream() + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)))); + }); - GetAutoFollowPatternResponse(Map patterns) { - this.patterns = Collections.unmodifiableMap(patterns); + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), ENTRY_PARSER, PATTERNS_FIELD); } - public Map getPatterns() { + public static GetAutoFollowPatternResponse fromXContent(final XContentParser parser) { + return PARSER.apply(parser, null); + } + + private final NavigableMap patterns; + + GetAutoFollowPatternResponse(NavigableMap patterns) { + this.patterns = Collections.unmodifiableNavigableMap(patterns); + } + + public NavigableMap getPatterns() { return patterns; } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java index 64eb9ba4f9f..b4a37286b4a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/GetAutoFollowPatternResponseTests.java @@ -27,8 +27,9 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.Collections; -import java.util.HashMap; import java.util.Map; +import java.util.NavigableMap; +import java.util.TreeMap; import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.FOLLOW_PATTERN_FIELD; import static org.elasticsearch.client.ccr.PutAutoFollowPatternRequest.LEADER_PATTERNS_FIELD; @@ -48,7 +49,7 @@ public class GetAutoFollowPatternResponseTests extends ESTestCase { private GetAutoFollowPatternResponse createTestInstance() { int numPatterns = randomIntBetween(0, 16); - Map patterns = new HashMap<>(numPatterns); + NavigableMap patterns = new TreeMap<>(); for (int i = 0; i < numPatterns; i++) { GetAutoFollowPatternResponse.Pattern pattern = new GetAutoFollowPatternResponse.Pattern( randomAlphaOfLength(4), Collections.singletonList(randomAlphaOfLength(4)), randomAlphaOfLength(4)); @@ -90,17 +91,26 @@ public class GetAutoFollowPatternResponseTests extends ESTestCase { public static void toXContent(GetAutoFollowPatternResponse response, XContentBuilder builder) throws IOException { builder.startObject(); { + builder.startArray(GetAutoFollowPatternResponse.PATTERNS_FIELD.getPreferredName()); for (Map.Entry entry : response.getPatterns().entrySet()) { - builder.startObject(entry.getKey()); - GetAutoFollowPatternResponse.Pattern pattern = entry.getValue(); - builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), pattern.getRemoteCluster()); - builder.field(LEADER_PATTERNS_FIELD.getPreferredName(), pattern.getLeaderIndexPatterns()); - if (pattern.getFollowIndexNamePattern()!= null) { - builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), pattern.getFollowIndexNamePattern()); + builder.startObject(); + { + builder.field(GetAutoFollowPatternResponse.NAME_FIELD.getPreferredName(), entry.getKey()); + builder.startObject(GetAutoFollowPatternResponse.PATTERN_FIELD.getPreferredName()); + { + GetAutoFollowPatternResponse.Pattern pattern = entry.getValue(); + builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), pattern.getRemoteCluster()); + builder.field(LEADER_PATTERNS_FIELD.getPreferredName(), pattern.getLeaderIndexPatterns()); + if (pattern.getFollowIndexNamePattern()!= null) { + builder.field(FOLLOW_PATTERN_FIELD.getPreferredName(), pattern.getFollowIndexNamePattern()); + } + entry.getValue().toXContentFragment(builder, ToXContent.EMPTY_PARAMS); + } + builder.endObject(); } - entry.getValue().toXContentFragment(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); } + builder.endArray(); } builder.endObject(); } diff --git a/docs/java-rest/high-level/execution.asciidoc b/docs/java-rest/high-level/execution.asciidoc index 4dfb11e196d..1028d9b6975 100644 --- a/docs/java-rest/high-level/execution.asciidoc +++ b/docs/java-rest/high-level/execution.asciidoc @@ -18,6 +18,15 @@ for the +{response}+ to be returned before continuing with code execution: include-tagged::{doc-tests-file}[{api}-execute] -------------------------------------------------- +Synchronous calls may throw an `IOException` in case of either failing to +parse the REST response in the high-level REST client, the request times out +or similar cases where there is no response coming back from the server. + +In cases where the server returns a `4xx` or `5xx` error code, the high-level +client tries to parse the response body error details instead and then throws +a generic `ElasticsearchException` and adds the original `ResponseException` as a +suppressed exception to it. + [id="{upid}-{api}-async"] ==== Asynchronous Execution @@ -36,7 +45,8 @@ the execution completes The asynchronous method does not block and returns immediately. Once it is completed the `ActionListener` is called back using the `onResponse` method if the execution successfully completed or using the `onFailure` method if -it failed. +it failed. Failure scenarios and expected exceptions are the same as in the +synchronous execution case. A typical listener for +{api}+ looks like: @@ -45,4 +55,4 @@ A typical listener for +{api}+ looks like: include-tagged::{doc-tests-file}[{api}-execute-listener] -------------------------------------------------- <1> Called when the execution is successfully completed. -<2> Called when the whole +{request}+ fails. \ No newline at end of file +<2> Called when the whole +{request}+ fails. diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc index b154f6b907e..19eb2b928ae 100644 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc @@ -87,15 +87,19 @@ The API returns the following result: [source,js] -------------------------------------------------- { - "my_auto_follow_pattern" : - { - "remote_cluster" : "remote_cluster", - "leader_index_patterns" : - [ - "leader_index*" - ], - "follow_index_pattern" : "{{leader_index}}-follower" - } + "patterns": [ + { + "name": "my_auto_follow_pattern", + "pattern": { + "remote_cluster" : "remote_cluster", + "leader_index_patterns" : + [ + "leader_index*" + ], + "follow_index_pattern" : "{{leader_index}}-follower" + } + } + ] } -------------------------------------------------- // TESTRESPONSE diff --git a/docs/reference/security/reference/files.asciidoc b/docs/reference/security/reference/files.asciidoc index 64a004c6646..306fdcdddc1 100644 --- a/docs/reference/security/reference/files.asciidoc +++ b/docs/reference/security/reference/files.asciidoc @@ -1,30 +1,31 @@ [role="xpack"] +[testenv="gold"] [[security-files]] -=== Security Files +=== Security files -{security} uses the following files: +The {es} {security-features} use the following files: -* `ES_PATH_CONF/roles.yml` defines the roles in use on the cluster - (read more <>). +* `ES_PATH_CONF/roles.yml` defines the roles in use on the cluster. See +{stack-ov}/defining-roles.html[Defining roles]. * `ES_PATH_CONF/elasticsearch-users` defines the users and their hashed passwords for - the <>. + the `file` realm. See <>. * `ES_PATH_CONF/elasticsearch-users_roles` defines the user roles assignment for the - the <>. + the `file` realm. See <>. * `ES_PATH_CONF/role_mapping.yml` defines the role assignments for a Distinguished Name (DN) to a role. This allows for LDAP and Active Directory - groups and users and PKI users to be mapped to roles (read more - <>). + groups and users and PKI users to be mapped to roles. See + {stack-ov}/mapping-roles.html[Mapping users and groups to roles]. -* `ES_PATH_CONF/log4j2.properties` contains audit information (read more - <>). +* `ES_PATH_CONF/log4j2.properties` contains audit information. See +{stack-ov}/audit-log-output.html[Logfile audit output]. [[security-files-location]] -IMPORTANT: Any files that {security} uses must be stored in the Elasticsearch - configuration directory. Elasticsearch runs with restricted permissions +IMPORTANT: Any files that the {security-features} use must be stored in the {es} + configuration directory. {es} runs with restricted permissions and is only permitted to read from the locations configured in the directory layout for enhanced security. diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 9f4f98baf66..e97ad2edc77 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -5,8 +5,9 @@ Security settings ++++ -By default, {security} is disabled when you have a basic or trial license. To -enable {security}, use the `xpack.security.enabled` setting. +By default, the {es} {security-features} are disabled when you have a basic or +trial license. To enable {security-features}, use the `xpack.security.enabled` +setting. You configure `xpack.security` settings to <> @@ -25,13 +26,15 @@ For more information about creating and updating the {es} keystore, see [[general-security-settings]] ==== General security settings `xpack.security.enabled`:: -Set to `true` to enable {security} on the node. + +Set to `true` to enable {es} {security-features} on the node. + + -- If set to `false`, which is the default value for basic and trial licenses, -{security} is disabled. It also affects all {kib} instances that connect to this -{es} instance; you do not need to disable {security} in those `kibana.yml` files. -For more information about disabling {security} in specific {kib} instances, see {kibana-ref}/security-settings-kb.html[{kib} security settings]. +{security-features} are disabled. It also affects all {kib} instances that +connect to this {es} instance; you do not need to disable {security-features} in +those `kibana.yml` files. For more information about disabling {security-features} +in specific {kib} instances, see +{kibana-ref}/security-settings-kb.html[{kib} security settings]. TIP: If you have gold or higher licenses, the default value is `true`; we recommend that you explicitly add this setting to avoid confusion. @@ -67,7 +70,7 @@ See <>. Defaults to `bcrypt`. [[anonymous-access-settings]] ==== Anonymous access settings You can configure the following anonymous access settings in -`elasticsearch.yml`. For more information, see {xpack-ref}/anonymous-access.html[ +`elasticsearch.yml`. For more information, see {stack-ov}/anonymous-access.html[ Enabling anonymous access]. `xpack.security.authc.anonymous.username`:: @@ -117,7 +120,7 @@ Defaults to `48h` (48 hours). You can set the following document and field level security settings in `elasticsearch.yml`. For more information, see -{xpack-ref}/field-and-document-access-control.html[Setting up document and field +{stack-ov}/field-and-document-access-control.html[Setting up document and field level security]. `xpack.security.dls_fls.enabled`:: @@ -165,7 +168,7 @@ xpack.security.authc.realms: ---------------------------------------- The valid settings vary depending on the realm type. For more -information, see {xpack-ref}/setting-up-authentication.html[Setting up authentication]. +information, see {stack-ov}/setting-up-authentication.html[Setting up authentication]. [float] [[ref-realm-settings]] @@ -204,7 +207,7 @@ Defaults to `ssha256`. `authentication.enabled`:: If set to `false`, disables authentication support in this realm, so that it only supports user lookups. -(See the {xpack-ref}/run-as-privilege.html[run as] and +(See the {stack-ov}/run-as-privilege.html[run as] and {stack-ov}/realm-chains.html#authorization_realms[authorization realms] features). Defaults to `true`. @@ -233,7 +236,7 @@ user credentials. See <>. Defaults to `ssha256`. `authentication.enabled`:: If set to `false`, disables authentication support in this realm, so that it only supports user lookups. -(See the {xpack-ref}/run-as-privilege.html[run as] and +(See the {stack-ov}/run-as-privilege.html[run as] and {stack-ov}/realm-chains.html#authorization_realms[authorization realms] features). Defaults to `true`. @@ -282,7 +285,7 @@ The DN template that replaces the user name with the string `{0}`. This setting is multivalued; you can specify multiple user contexts. Required to operate in user template mode. If `user_search.base_dn` is specified, this setting is not valid. For more information on -the different modes, see {xpack-ref}/ldap-realm.html[LDAP realms]. +the different modes, see {stack-ov}/ldap-realm.html[LDAP realms]. `authorization_realms`:: The names of the realms that should be consulted for delegated authorization. @@ -306,7 +309,7 @@ to `memberOf`. Specifies a container DN to search for users. Required to operated in user search mode. If `user_dn_templates` is specified, this setting is not valid. For more information on -the different modes, see {xpack-ref}/ldap-realm.html[LDAP realms]. +the different modes, see {stack-ov}/ldap-realm.html[LDAP realms]. `user_search.scope`:: The scope of the user search. Valid values are `sub_tree`, `one_level` or @@ -379,11 +382,11 @@ the filter. If not set, the user DN is passed into the filter. Defaults to Empt If set to `true`, the names of any unmapped LDAP groups are used as role names and assigned to the user. A group is considered to be _unmapped_ if it is not referenced in a -{xpack-ref}/mapping-roles.html#mapping-roles-file[role-mapping file]. API-based +{stack-ov}/mapping-roles.html#mapping-roles-file[role-mapping file]. API-based role mappings are not considered. Defaults to `false`. `files.role_mapping`:: -The {xpack-ref}/security-files.html[location] for the {xpack-ref}/mapping-roles.html#mapping-roles[ +The <> for the {stack-ov}/mapping-roles.html#mapping-roles[ YAML role mapping configuration file]. Defaults to `ES_PATH_CONF/role_mapping.yml`. @@ -501,7 +504,7 @@ in-memory cached user credentials. See <>. Defaults to `ssha256 `authentication.enabled`:: If set to `false`, disables authentication support in this realm, so that it only supports user lookups. -(See the {xpack-ref}/run-as-privilege.html[run as] and +(See the {stack-ov}/run-as-privilege.html[run as] and {stack-ov}/realm-chains.html#authorization_realms[authorization realms] features). Defaults to `true`. @@ -557,7 +560,7 @@ is not referenced in any role-mapping files. API-based role mappings are not considered. Defaults to `false`. `files.role_mapping`:: -The {xpack-ref}/security-files.html[location] for the YAML +The <> for the YAML role mapping configuration file. Defaults to `ES_PATH_CONF/role_mapping.yml`. `user_search.base_dn`:: @@ -748,7 +751,7 @@ the in-memory cached user credentials. See <>. Defaults to `ssh `authentication.enabled`:: If set to `false`, disables authentication support in this realm, so that it only supports user lookups. -(See the {xpack-ref}/run-as-privilege.html[run as] and +(See the {stack-ov}/run-as-privilege.html[run as] and {stack-ov}/realm-chains.html#authorization_realms[authorization realms] features). Defaults to `true`. @@ -789,8 +792,8 @@ The path of a truststore to use. Defaults to the trusted certificates configured for SSL. This setting cannot be used with `certificate_authorities`. `files.role_mapping`:: -Specifies the {xpack-ref}/security-files.html[location] of the -{xpack-ref}/mapping-roles.html[YAML role mapping configuration file]. +Specifies the <> of the +{stack-ov}/mapping-roles.html[YAML role mapping configuration file]. Defaults to `ES_PATH_CONF/role_mapping.yml`. `authorization_realms`:: @@ -1207,7 +1210,7 @@ through the list of URLs will continue until a successful connection is made. ==== Default TLS/SSL settings You can configure the following TLS/SSL settings in `elasticsearch.yml`. For more information, see -{xpack-ref}/encrypting-communications.html[Encrypting communications]. These settings will be used +{stack-ov}/encrypting-communications.html[Encrypting communications]. These settings will be used for all of {xpack} unless they have been overridden by more specific settings such as those for HTTP or Transport. @@ -1447,7 +1450,7 @@ See also <>. [float] [[ip-filtering-settings]] ==== IP filtering settings -You can configure the following settings for {xpack-ref}/ip-filtering.html[IP filtering]. +You can configure the following settings for {stack-ov}/ip-filtering.html[IP filtering]. `xpack.security.transport.filter.allow`:: List of IP addresses to allow. diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java index 008e7ac931a..5f869e2ac72 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java @@ -102,7 +102,9 @@ public class MultiTermVectorsRequest extends ActionRequest throw new IllegalArgumentException("docs array element should include an object"); } TermVectorsRequest termVectorsRequest = new TermVectorsRequest(template); - termVectorsRequest.type(MapperService.SINGLE_MAPPING_NAME); + if (termVectorsRequest.type() == null) { + termVectorsRequest.type(MapperService.SINGLE_MAPPING_NAME); + } TermVectorsRequest.parseRequest(termVectorsRequest, parser); add(termVectorsRequest); } diff --git a/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java b/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java index a3af52a9a4a..5c150406098 100644 --- a/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java @@ -42,4 +42,17 @@ public interface RepositoryPlugin { default Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { return Collections.emptyMap(); } + + /** + * Returns internal repository types added by this plugin. Internal repositories cannot be registered + * through the external API. + * + * @param env The environment for the local node, which may be used for the local settings and path.repo + * + * The key of the returned {@link Map} is the type name of the repository and + * the value is a factory to construct the {@link Repository} interface. + */ + default Map getInternalRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { + return Collections.emptyMap(); + } } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java index 637ec2d8dfb..90e3c94dfb3 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java @@ -58,8 +58,24 @@ public class RepositoriesModule extends AbstractModule { } } + Map internalFactories = new HashMap<>(); + for (RepositoryPlugin repoPlugin : repoPlugins) { + Map newRepoTypes = repoPlugin.getInternalRepositories(env, namedXContentRegistry); + for (Map.Entry entry : newRepoTypes.entrySet()) { + if (internalFactories.put(entry.getKey(), entry.getValue()) != null) { + throw new IllegalArgumentException("Internal repository type [" + entry.getKey() + "] is already registered"); + } + if (factories.put(entry.getKey(), entry.getValue()) != null) { + throw new IllegalArgumentException("Internal repository type [" + entry.getKey() + "] is already registered as a " + + "non-internal repository"); + } + } + } + Map repositoryTypes = Collections.unmodifiableMap(factories); - repositoriesService = new RepositoriesService(env.settings(), clusterService, transportService, repositoryTypes, threadPool); + Map internalRepositoryTypes = Collections.unmodifiableMap(internalFactories); + repositoriesService = new RepositoriesService(env.settings(), clusterService, transportService, repositoryTypes, + internalRepositoryTypes, threadPool); } @Override diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index fe80592d009..7ab6ae0a1f4 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -36,6 +36,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.snapshots.RestoreService; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; @@ -57,6 +58,7 @@ public class RepositoriesService implements ClusterStateApplier { private static final Logger logger = LogManager.getLogger(RepositoriesService.class); private final Map typesRegistry; + private final Map internalTypesRegistry; private final ClusterService clusterService; @@ -64,12 +66,14 @@ public class RepositoriesService implements ClusterStateApplier { private final VerifyNodeRepositoryAction verifyAction; + private final Map internalRepositories = ConcurrentCollections.newConcurrentMap(); private volatile Map repositories = Collections.emptyMap(); public RepositoriesService(Settings settings, ClusterService clusterService, TransportService transportService, - Map typesRegistry, + Map typesRegistry, Map internalTypesRegistry, ThreadPool threadPool) { this.typesRegistry = typesRegistry; + this.internalTypesRegistry = internalTypesRegistry; this.clusterService = clusterService; this.threadPool = threadPool; // Doesn't make sense to maintain repositories on non-master and non-data nodes @@ -101,7 +105,7 @@ public class RepositoriesService implements ClusterStateApplier { // Trying to create the new repository on master to make sure it works try { - closeRepository(createRepository(newRepositoryMetaData)); + closeRepository(createRepository(newRepositoryMetaData, typesRegistry)); } catch (Exception e) { registrationListener.onFailure(e); return; @@ -315,7 +319,7 @@ public class RepositoriesService implements ClusterStateApplier { closeRepository(repository); repository = null; try { - repository = createRepository(repositoryMetaData); + repository = createRepository(repositoryMetaData, typesRegistry); } catch (RepositoryException ex) { // TODO: this catch is bogus, it means the old repo is already closed, // but we have nothing to replace it @@ -324,7 +328,7 @@ public class RepositoriesService implements ClusterStateApplier { } } else { try { - repository = createRepository(repositoryMetaData); + repository = createRepository(repositoryMetaData, typesRegistry); } catch (RepositoryException ex) { logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex); } @@ -355,9 +359,37 @@ public class RepositoriesService implements ClusterStateApplier { if (repository != null) { return repository; } + repository = internalRepositories.get(repositoryName); + if (repository != null) { + return repository; + } throw new RepositoryMissingException(repositoryName); } + public void registerInternalRepository(String name, String type) { + RepositoryMetaData metaData = new RepositoryMetaData(name, type, Settings.EMPTY); + Repository repository = internalRepositories.computeIfAbsent(name, (n) -> { + logger.debug("put internal repository [{}][{}]", name, type); + return createRepository(metaData, internalTypesRegistry); + }); + if (type.equals(repository.getMetadata().type()) == false) { + logger.warn(new ParameterizedMessage("internal repository [{}][{}] already registered. this prevented the registration of " + + "internal repository [{}][{}].", name, repository.getMetadata().type(), name, type)); + } else if (repositories.containsKey(name)) { + logger.warn(new ParameterizedMessage("non-internal repository [{}] already registered. this repository will block the " + + "usage of internal repository [{}][{}].", name, metaData.type(), name)); + } + } + + public void unregisterInternalRepository(String name) { + Repository repository = internalRepositories.remove(name); + if (repository != null) { + RepositoryMetaData metadata = repository.getMetadata(); + logger.debug(() -> new ParameterizedMessage("delete internal repository [{}][{}].", metadata.type(), name)); + closeRepository(repository); + } + } + /** Closes the given repository. */ private void closeRepository(Repository repository) { logger.debug("closing repository [{}][{}]", repository.getMetadata().type(), repository.getMetadata().name()); @@ -365,21 +397,21 @@ public class RepositoriesService implements ClusterStateApplier { } /** - * Creates repository holder + * Creates repository holder. This method starts the repository */ - private Repository createRepository(RepositoryMetaData repositoryMetaData) { + private Repository createRepository(RepositoryMetaData repositoryMetaData, Map factories) { logger.debug("creating repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()); - Repository.Factory factory = typesRegistry.get(repositoryMetaData.type()); + Repository.Factory factory = factories.get(repositoryMetaData.type()); if (factory == null) { throw new RepositoryException(repositoryMetaData.name(), "repository type [" + repositoryMetaData.type() + "] does not exist"); } try { - Repository repository = factory.create(repositoryMetaData, typesRegistry::get); + Repository repository = factory.create(repositoryMetaData, factories::get); repository.start(); return repository; } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e); + logger.warn(new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e); throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e); } } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index 8da433fdc6c..2c36af8638f 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -167,7 +167,7 @@ public abstract class RemoteClusterAware { REMOTE_CLUSTERS_SEEDS); protected final Settings settings; - protected final ClusterNameExpressionResolver clusterNameResolver; + private final ClusterNameExpressionResolver clusterNameResolver; /** * Creates a new {@link RemoteClusterAware} instance @@ -242,14 +242,15 @@ public abstract class RemoteClusterAware { * indices per cluster are collected as a list in the returned map keyed by the cluster alias. Local indices are grouped under * {@link #LOCAL_CLUSTER_GROUP_KEY}. The returned map is mutable. * + * @param remoteClusterNames the remote cluster names * @param requestIndices the indices in the search request to filter * @param indexExists a predicate that can test if a certain index or alias exists in the local cluster * * @return a map of grouped remote and local indices */ - public Map> groupClusterIndices(String[] requestIndices, Predicate indexExists) { + protected Map> groupClusterIndices(Set remoteClusterNames, String[] requestIndices, + Predicate indexExists) { Map> perClusterIndices = new HashMap<>(); - Set remoteClusterNames = getRemoteClusterNames(); for (String index : requestIndices) { int i = index.indexOf(RemoteClusterService.REMOTE_CLUSTER_INDEX_SEPARATOR); if (i >= 0) { @@ -281,9 +282,6 @@ public abstract class RemoteClusterAware { return perClusterIndices; } - protected abstract Set getRemoteClusterNames(); - - /** * Subclasses must implement this to receive information about updated cluster aliases. If the given address list is * empty the cluster alias is unregistered and should be removed. diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 0028a2537a2..52da474f2dd 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -278,7 +278,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl public Map groupIndices(IndicesOptions indicesOptions, String[] indices, Predicate indexExists) { Map originalIndicesMap = new HashMap<>(); if (isCrossClusterSearchEnabled()) { - final Map> groupedIndices = groupClusterIndices(indices, indexExists); + final Map> groupedIndices = groupClusterIndices(getRemoteClusterNames(), indices, indexExists); if (groupedIndices.isEmpty()) { //search on _all in the local cluster if neither local indices nor remote indices were specified originalIndicesMap.put(LOCAL_CLUSTER_GROUP_KEY, new OriginalIndices(Strings.EMPTY_ARRAY, indicesOptions)); @@ -380,8 +380,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl return connection; } - @Override - protected Set getRemoteClusterNames() { + Set getRemoteClusterNames() { return this.remoteClusters.keySet(); } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 4625aa04be3..89a6bc8dcf8 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -461,7 +461,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice Collections.emptySet()); final ClusterService clusterService = mock(ClusterService.class); final RepositoriesService repositoriesService = new RepositoriesService(settings, clusterService, - transportService, null, threadPool); + transportService, Collections.emptyMap(), Collections.emptyMap(), threadPool); final PeerRecoveryTargetService recoveryTargetService = new PeerRecoveryTargetService(threadPool, transportService, null, clusterService); final ShardStateAction shardStateAction = mock(ShardStateAction.class); diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java new file mode 100644 index 00000000000..96a9670d162 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java @@ -0,0 +1,101 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories; + +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RepositoriesModuleTests extends ESTestCase { + + private Environment environment; + private NamedXContentRegistry contentRegistry; + private List repoPlugins = new ArrayList<>(); + private RepositoryPlugin plugin1; + private RepositoryPlugin plugin2; + private Repository.Factory factory; + + @Override + public void setUp() throws Exception { + super.setUp(); + environment = mock(Environment.class); + contentRegistry = mock(NamedXContentRegistry.class); + plugin1 = mock(RepositoryPlugin.class); + plugin2 = mock(RepositoryPlugin.class); + factory = mock(Repository.Factory.class); + repoPlugins.add(plugin1); + repoPlugins.add(plugin2); + when(environment.settings()).thenReturn(Settings.EMPTY); + } + + public void testCanRegisterTwoRepositoriesWithDifferentTypes() { + when(plugin1.getRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type1", factory)); + when(plugin2.getRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type2", factory)); + + // Would throw + new RepositoriesModule(environment, repoPlugins, mock(TransportService.class), mock(ClusterService.class), + mock(ThreadPool.class), contentRegistry); + } + + public void testCannotRegisterTwoRepositoriesWithSameTypes() { + when(plugin1.getRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type1", factory)); + when(plugin2.getRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type1", factory)); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> new RepositoriesModule(environment, repoPlugins, mock(TransportService.class), mock(ClusterService.class), + mock(ThreadPool.class), contentRegistry)); + + assertEquals("Repository type [type1] is already registered", ex.getMessage()); + } + + public void testCannotRegisterTwoInternalRepositoriesWithSameTypes() { + when(plugin1.getInternalRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type1", factory)); + when(plugin2.getInternalRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type1", factory)); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> new RepositoriesModule(environment, repoPlugins, mock(TransportService.class), mock(ClusterService.class), + mock(ThreadPool.class), contentRegistry)); + + assertEquals("Internal repository type [type1] is already registered", ex.getMessage()); + } + + public void testCannotRegisterNormalAndInternalRepositoriesWithSameTypes() { + when(plugin1.getRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type1", factory)); + when(plugin2.getInternalRepositories(environment, contentRegistry)).thenReturn(Collections.singletonMap("type1", factory)); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, + () -> new RepositoriesModule(environment, repoPlugins, mock(TransportService.class), mock(ClusterService.class), + mock(ThreadPool.class), contentRegistry)); + + assertEquals("Internal repository type [type1] is already registered as a non-internal repository", ex.getMessage()); + } +} diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java new file mode 100644 index 00000000000..c02ab0d1856 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -0,0 +1,233 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories; + +import org.apache.lucene.index.IndexCommit; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.component.LifecycleListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotShardFailure; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +import static org.mockito.Mockito.mock; + +public class RepositoriesServiceTests extends ESTestCase { + + private RepositoriesService repositoriesService; + + @Override + public void setUp() throws Exception { + super.setUp(); + ThreadPool threadPool = mock(ThreadPool.class); + final TransportService transportService = new TransportService(Settings.EMPTY, mock(Transport.class), threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> DiscoveryNode.createLocal(Settings.EMPTY, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), null, + Collections.emptySet()); + repositoriesService = new RepositoriesService(Settings.EMPTY, mock(ClusterService.class), + transportService, Collections.emptyMap(), Collections.singletonMap(TestRepository.TYPE, TestRepository::new), threadPool); + } + + public void testRegisterInternalRepository() { + String repoName = "name"; + expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(repoName)); + repositoriesService.registerInternalRepository(repoName, TestRepository.TYPE); + Repository repository = repositoriesService.repository(repoName); + assertEquals(repoName, repository.getMetadata().name()); + assertEquals(TestRepository.TYPE, repository.getMetadata().type()); + assertEquals(Settings.EMPTY, repository.getMetadata().settings()); + assertTrue(((TestRepository) repository).isStarted); + } + + public void testUnregisterInternalRepository() { + String repoName = "name"; + expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(repoName)); + repositoriesService.registerInternalRepository(repoName, TestRepository.TYPE); + Repository repository = repositoriesService.repository(repoName); + assertFalse(((TestRepository) repository).isClosed); + repositoriesService.unregisterInternalRepository(repoName); + expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(repoName)); + assertTrue(((TestRepository) repository).isClosed); + } + + public void testRegisterWillNotUpdateIfInternalRepositoryWithNameExists() { + String repoName = "name"; + expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(repoName)); + repositoriesService.registerInternalRepository(repoName, TestRepository.TYPE); + Repository repository = repositoriesService.repository(repoName); + assertFalse(((TestRepository) repository).isClosed); + repositoriesService.registerInternalRepository(repoName, TestRepository.TYPE); + assertFalse(((TestRepository) repository).isClosed); + Repository repository2 = repositoriesService.repository(repoName); + assertSame(repository, repository2); + } + + private static class TestRepository implements Repository { + + private static final String TYPE = "internal"; + private boolean isClosed; + private boolean isStarted; + + private final RepositoryMetaData metaData; + + private TestRepository(RepositoryMetaData metaData) { + this.metaData = metaData; + } + + @Override + public RepositoryMetaData getMetadata() { + return metaData; + } + + @Override + public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) { + return null; + } + + @Override + public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) { + return null; + } + + @Override + public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId index) throws IOException { + return null; + } + + @Override + public RepositoryData getRepositoryData() { + return null; + } + + @Override + public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData metaData) { + + } + + @Override + public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, + int totalShards, List shardFailures, long repositoryStateId, + boolean includeGlobalState) { + return null; + } + + @Override + public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { + + } + + @Override + public long getSnapshotThrottleTimeInNanos() { + return 0; + } + + @Override + public long getRestoreThrottleTimeInNanos() { + return 0; + } + + @Override + public String startVerification() { + return null; + } + + @Override + public void endVerification(String verificationToken) { + + } + + @Override + public void verify(String verificationToken, DiscoveryNode localNode) { + + } + + @Override + public boolean isReadOnly() { + return false; + } + + @Override + public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, + IndexShardSnapshotStatus snapshotStatus) { + + } + + @Override + public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, + RecoveryState recoveryState) { + + } + + @Override + public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) { + return null; + } + + @Override + public Lifecycle.State lifecycleState() { + return null; + } + + @Override + public void addLifecycleListener(LifecycleListener listener) { + + } + + @Override + public void removeLifecycleListener(LifecycleListener listener) { + + } + + @Override + public void start() { + isStarted = true; + } + + @Override + public void stop() { + + } + + @Override + public void close() { + isClosed = true; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index 5bc05507b9d..9a185163436 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -219,8 +219,9 @@ public class RemoteClusterServiceTests extends ESTestCase { assertTrue(service.isRemoteClusterRegistered("cluster_1")); assertTrue(service.isRemoteClusterRegistered("cluster_2")); assertFalse(service.isRemoteClusterRegistered("foo")); - Map> perClusterIndices = service.groupClusterIndices(new String[]{"foo:bar", "cluster_1:bar", - "cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo", "cluster*:baz", "*:boo", "no*match:boo"}, + Map> perClusterIndices = service.groupClusterIndices(service.getRemoteClusterNames(), + new String[]{"foo:bar", "cluster_1:bar", "cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo", + "cluster*:baz", "*:boo", "no*match:boo"}, i -> false); List localIndices = perClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); assertNotNull(localIndices); @@ -230,7 +231,7 @@ public class RemoteClusterServiceTests extends ESTestCase { assertEquals(Arrays.asList("foo:bar", "foo*", "baz", "boo"), perClusterIndices.get("cluster_2")); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> - service.groupClusterIndices(new String[]{"foo:bar", "cluster_1:bar", + service.groupClusterIndices(service.getRemoteClusterNames(), new String[]{"foo:bar", "cluster_1:bar", "cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo"}, "cluster_1:bar"::equals)); assertEquals("Can not filter indices; index cluster_1:bar exists but there is also a remote cluster named:" + @@ -277,7 +278,7 @@ public class RemoteClusterServiceTests extends ESTestCase { } { IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> - service.groupClusterIndices(new String[]{"foo:bar", "cluster_1:bar", + service.groupClusterIndices(service.getRemoteClusterNames(), new String[]{"foo:bar", "cluster_1:bar", "cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo"}, "cluster_1:bar"::equals)); assertEquals("Can not filter indices; index cluster_1:bar exists but there is also a remote cluster named:" + " cluster_1", iae.getMessage()); diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index c79824287b4..1fe51f8ff00 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -157,5 +157,9 @@ include::authentication/configuring-kerberos-realm.asciidoc[] include::fips-140-compliance.asciidoc[] :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/settings/security-settings.asciidoc include::{es-repo-dir}/settings/security-settings.asciidoc[] + +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/reference/files.asciidoc +include::{es-repo-dir}/security/reference/files.asciidoc[] + :edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/settings/audit-settings.asciidoc include::{es-repo-dir}/settings/audit-settings.asciidoc[] diff --git a/x-pack/docs/en/security/reference.asciidoc b/x-pack/docs/en/security/reference.asciidoc deleted file mode 100644 index 75de1daee6d..00000000000 --- a/x-pack/docs/en/security/reference.asciidoc +++ /dev/null @@ -1,11 +0,0 @@ -[role="xpack"] -[[security-reference]] -== Reference -* <> -* {ref}/security-settings.html[Security Settings] -* <> -* {ref}/security-api.html[Security API] -* {ref}/xpack-commands.html[Security Commands] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/security/reference/files.asciidoc -include::{es-repo-dir}/security/reference/files.asciidoc[] diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml index 4d4026f46a4..ebf9176c30a 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml @@ -31,15 +31,17 @@ - do: ccr.get_auto_follow_pattern: name: my_pattern - - match: { my_pattern.remote_cluster: 'local' } - - match: { my_pattern.leader_index_patterns: ['logs-*'] } - - match: { my_pattern.max_outstanding_read_requests: 2 } + - match: { patterns.0.name: 'my_pattern' } + - match: { patterns.0.pattern.remote_cluster: 'local' } + - match: { patterns.0.pattern.leader_index_patterns: ['logs-*'] } + - match: { patterns.0.pattern.max_outstanding_read_requests: 2 } - do: ccr.get_auto_follow_pattern: {} - - match: { my_pattern.remote_cluster: 'local' } - - match: { my_pattern.leader_index_patterns: ['logs-*'] } - - match: { my_pattern.max_outstanding_read_requests: 2 } + - match: { patterns.0.name: 'my_pattern' } + - match: { patterns.0.pattern.remote_cluster: 'local' } + - match: { patterns.0.pattern.leader_index_patterns: ['logs-*'] } + - match: { patterns.0.pattern.max_outstanding_read_requests: 2 } - do: ccr.delete_auto_follow_pattern: diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index ec196d637e1..7cceecbd399 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -6,9 +6,11 @@ package org.elasticsearch.xpack.ccr; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; @@ -32,6 +34,8 @@ import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.PersistentTaskPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.Repository; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; @@ -41,46 +45,50 @@ import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.ccr.action.AutoFollowCoordinator; -import org.elasticsearch.xpack.ccr.action.TransportGetAutoFollowPatternAction; -import org.elasticsearch.xpack.ccr.action.TransportUnfollowAction; -import org.elasticsearch.xpack.ccr.rest.RestGetAutoFollowPatternAction; -import org.elasticsearch.xpack.ccr.action.TransportCcrStatsAction; -import org.elasticsearch.xpack.ccr.rest.RestCcrStatsAction; -import org.elasticsearch.xpack.ccr.rest.RestUnfollowAction; -import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; -import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; -import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction; -import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.action.ShardChangesAction; import org.elasticsearch.xpack.ccr.action.ShardFollowTask; import org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor; -import org.elasticsearch.xpack.ccr.action.TransportFollowStatsAction; -import org.elasticsearch.xpack.ccr.action.TransportPutFollowAction; +import org.elasticsearch.xpack.ccr.action.TransportCcrStatsAction; import org.elasticsearch.xpack.ccr.action.TransportDeleteAutoFollowPatternAction; -import org.elasticsearch.xpack.ccr.action.TransportResumeFollowAction; -import org.elasticsearch.xpack.ccr.action.TransportPutAutoFollowPatternAction; +import org.elasticsearch.xpack.ccr.action.TransportFollowStatsAction; +import org.elasticsearch.xpack.ccr.action.TransportGetAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.action.TransportPauseFollowAction; +import org.elasticsearch.xpack.ccr.action.TransportPutAutoFollowPatternAction; +import org.elasticsearch.xpack.ccr.action.TransportPutFollowAction; +import org.elasticsearch.xpack.ccr.action.TransportResumeFollowAction; +import org.elasticsearch.xpack.ccr.action.TransportUnfollowAction; import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsAction; import org.elasticsearch.xpack.ccr.action.bulk.TransportBulkShardOperationsAction; +import org.elasticsearch.xpack.ccr.action.repositories.DeleteInternalCcrRepositoryAction; +import org.elasticsearch.xpack.ccr.action.repositories.PutInternalCcrRepositoryAction; import org.elasticsearch.xpack.ccr.index.engine.FollowingEngineFactory; -import org.elasticsearch.xpack.ccr.rest.RestFollowStatsAction; -import org.elasticsearch.xpack.ccr.rest.RestPutFollowAction; +import org.elasticsearch.xpack.ccr.repository.CcrRepository; +import org.elasticsearch.xpack.ccr.rest.RestCcrStatsAction; import org.elasticsearch.xpack.ccr.rest.RestDeleteAutoFollowPatternAction; -import org.elasticsearch.xpack.ccr.rest.RestResumeFollowAction; -import org.elasticsearch.xpack.ccr.rest.RestPutAutoFollowPatternAction; +import org.elasticsearch.xpack.ccr.rest.RestFollowStatsAction; +import org.elasticsearch.xpack.ccr.rest.RestGetAutoFollowPatternAction; import org.elasticsearch.xpack.ccr.rest.RestPauseFollowAction; +import org.elasticsearch.xpack.ccr.rest.RestPutAutoFollowPatternAction; +import org.elasticsearch.xpack.ccr.rest.RestPutFollowAction; +import org.elasticsearch.xpack.ccr.rest.RestResumeFollowAction; +import org.elasticsearch.xpack.ccr.rest.RestUnfollowAction; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.ccr.ShardFollowNodeTaskStatus; +import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.core.ccr.action.DeleteAutoFollowPatternAction; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; +import org.elasticsearch.xpack.core.ccr.action.GetAutoFollowPatternAction; +import org.elasticsearch.xpack.core.ccr.action.PauseFollowAction; +import org.elasticsearch.xpack.core.ccr.action.PutAutoFollowPatternAction; import org.elasticsearch.xpack.core.ccr.action.PutFollowAction; import org.elasticsearch.xpack.core.ccr.action.ResumeFollowAction; -import org.elasticsearch.xpack.core.ccr.action.PauseFollowAction; import org.elasticsearch.xpack.core.ccr.action.UnfollowAction; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.function.Supplier; @@ -92,7 +100,7 @@ import static org.elasticsearch.xpack.core.XPackSettings.CCR_ENABLED_SETTING; /** * Container class for CCR functionality. */ -public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, EnginePlugin { +public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, EnginePlugin, RepositoryPlugin { public static final String CCR_THREAD_POOL_NAME = "ccr"; public static final String CCR_CUSTOM_METADATA_KEY = "ccr"; @@ -104,6 +112,7 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E private final boolean enabled; private final Settings settings; private final CcrLicenseChecker ccrLicenseChecker; + private final SetOnce repositoryManager = new SetOnce<>(); /** * Construct an instance of the CCR container with the specified settings. @@ -142,6 +151,8 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E return emptyList(); } + this.repositoryManager.set(new CcrRepositoryManager(settings, clusterService, (NodeClient) client)); + return Arrays.asList( ccrLicenseChecker, new AutoFollowCoordinator(settings, client, threadPool, clusterService, ccrLicenseChecker) @@ -166,6 +177,10 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E // internal actions new ActionHandler<>(BulkShardOperationsAction.INSTANCE, TransportBulkShardOperationsAction.class), new ActionHandler<>(ShardChangesAction.INSTANCE, ShardChangesAction.TransportAction.class), + new ActionHandler<>(PutInternalCcrRepositoryAction.INSTANCE, + PutInternalCcrRepositoryAction.TransportPutInternalRepositoryAction.class), + new ActionHandler<>(DeleteInternalCcrRepositoryAction.INSTANCE, + DeleteInternalCcrRepositoryAction.TransportDeleteInternalRepositoryAction.class), // stats action new ActionHandler<>(FollowStatsAction.INSTANCE, TransportFollowStatsAction.class), new ActionHandler<>(CcrStatsAction.INSTANCE, TransportCcrStatsAction.class), @@ -259,6 +274,12 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E return Collections.singletonList(new FixedExecutorBuilder(settings, CCR_THREAD_POOL_NAME, 32, 100, "xpack.ccr.ccr_thread_pool")); } + @Override + public Map getInternalRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { + Repository.Factory repositoryFactory = (metadata) -> new CcrRepository(metadata, settings); + return Collections.singletonMap(CcrRepository.TYPE, repositoryFactory); + } + protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java new file mode 100644 index 00000000000..f86789a880e --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrRepositoryManager.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.xpack.ccr.action.repositories.DeleteInternalCcrRepositoryAction; +import org.elasticsearch.xpack.ccr.action.repositories.DeleteInternalCcrRepositoryRequest; +import org.elasticsearch.xpack.ccr.action.repositories.PutInternalCcrRepositoryAction; +import org.elasticsearch.xpack.ccr.action.repositories.PutInternalCcrRepositoryRequest; +import org.elasticsearch.xpack.ccr.repository.CcrRepository; + +import java.util.List; + +class CcrRepositoryManager extends RemoteClusterAware { + + private final NodeClient client; + + CcrRepositoryManager(Settings settings, ClusterService clusterService, NodeClient client) { + super(settings); + this.client = client; + listenForUpdates(clusterService.getClusterSettings()); + } + + @Override + protected void updateRemoteCluster(String clusterAlias, List addresses, String proxyAddress) { + String repositoryName = CcrRepository.NAME_PREFIX + clusterAlias; + if (addresses.isEmpty()) { + DeleteInternalCcrRepositoryRequest request = new DeleteInternalCcrRepositoryRequest(repositoryName); + PlainActionFuture f = PlainActionFuture.newFuture(); + client.executeLocally(DeleteInternalCcrRepositoryAction.INSTANCE, request, f); + assert f.isDone() : "Should be completed as it is executed synchronously"; + } else { + ActionRequest request = new PutInternalCcrRepositoryRequest(repositoryName, CcrRepository.TYPE); + PlainActionFuture f = PlainActionFuture.newFuture(); + client.executeLocally(PutInternalCcrRepositoryAction.INSTANCE, request, f); + assert f.isDone() : "Should be completed as it is executed synchronously"; + } + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryAction.java new file mode 100644 index 00000000000..e85ce65858e --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryAction.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action.repositories; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; + +public class DeleteInternalCcrRepositoryAction extends Action { + + public static final DeleteInternalCcrRepositoryAction INSTANCE = new DeleteInternalCcrRepositoryAction(); + public static final String NAME = "cluster:admin/ccr/internal_repository/delete"; + + private DeleteInternalCcrRepositoryAction() { + super(NAME); + } + + @Override + public DeleteInternalCcrRepositoryResponse newResponse() { + throw new UnsupportedOperationException(); + } + + @Override + public Writeable.Reader getResponseReader() { + return DeleteInternalCcrRepositoryResponse::new; + } + + public static class TransportDeleteInternalRepositoryAction + extends TransportAction { + + private final RepositoriesService repositoriesService; + + @Inject + public TransportDeleteInternalRepositoryAction(RepositoriesService repositoriesService, ActionFilters actionFilters, + TransportService transportService) { + super(NAME, actionFilters, transportService.getTaskManager()); + this.repositoriesService = repositoriesService; + } + + @Override + protected void doExecute(Task task, DeleteInternalCcrRepositoryRequest request, + ActionListener listener) { + repositoriesService.unregisterInternalRepository(request.getName()); + listener.onResponse(new DeleteInternalCcrRepositoryResponse()); + } + } + + public static class DeleteInternalCcrRepositoryResponse extends ActionResponse { + + DeleteInternalCcrRepositoryResponse() { + super(); + } + + DeleteInternalCcrRepositoryResponse(StreamInput streamInput) throws IOException { + super(streamInput); + } + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryRequest.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryRequest.java new file mode 100644 index 00000000000..12264c1d57c --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/DeleteInternalCcrRepositoryRequest.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action.repositories; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Objects; + +public class DeleteInternalCcrRepositoryRequest extends ActionRequest { + + private final String name; + + public DeleteInternalCcrRepositoryRequest(String name) { + this.name = Objects.requireNonNull(name); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("DeleteInternalRepositoryRequest cannot be serialized for sending across the wire."); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException("DeleteInternalRepositoryRequest cannot be serialized for sending across the wire."); + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteInternalCcrRepositoryRequest that = (DeleteInternalCcrRepositoryRequest) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } + + @Override + public String toString() { + return "DeleteInternalRepositoryRequest{" + + "name='" + name + '\'' + + '}'; + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryAction.java new file mode 100644 index 00000000000..2d12cc4d77a --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryAction.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action.repositories; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; + +public class PutInternalCcrRepositoryAction extends Action { + + public static final PutInternalCcrRepositoryAction INSTANCE = new PutInternalCcrRepositoryAction(); + public static final String NAME = "cluster:admin/ccr/internal_repository/put"; + + private PutInternalCcrRepositoryAction() { + super(NAME); + } + + @Override + public PutInternalCcrRepositoryResponse newResponse() { + throw new UnsupportedOperationException(); + } + + @Override + public Writeable.Reader getResponseReader() { + return PutInternalCcrRepositoryResponse::new; + } + + public static class TransportPutInternalRepositoryAction + extends TransportAction { + + private final RepositoriesService repositoriesService; + + @Inject + public TransportPutInternalRepositoryAction(RepositoriesService repositoriesService, ActionFilters actionFilters, + TransportService transportService) { + super(NAME, actionFilters, transportService.getTaskManager()); + this.repositoriesService = repositoriesService; + } + + @Override + protected void doExecute(Task task, PutInternalCcrRepositoryRequest request, + ActionListener listener) { + repositoriesService.registerInternalRepository(request.getName(), request.getType()); + listener.onResponse(new PutInternalCcrRepositoryResponse()); + } + } + + public static class PutInternalCcrRepositoryResponse extends ActionResponse { + + PutInternalCcrRepositoryResponse() { + super(); + } + + PutInternalCcrRepositoryResponse(StreamInput streamInput) throws IOException { + super(streamInput); + } + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryRequest.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryRequest.java new file mode 100644 index 00000000000..71efcdf319d --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/repositories/PutInternalCcrRepositoryRequest.java @@ -0,0 +1,71 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action.repositories; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Objects; + +public class PutInternalCcrRepositoryRequest extends ActionRequest { + + private final String name; + private final String type; + + public PutInternalCcrRepositoryRequest(String name, String type) { + this.name = Objects.requireNonNull(name); + this.type = Objects.requireNonNull(type); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + throw new UnsupportedOperationException("PutInternalRepositoryRequest cannot be serialized for sending across the wire."); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new UnsupportedOperationException("PutInternalRepositoryRequest cannot be serialized for sending across the wire."); + } + + public String getName() { + return name; + } + + public String getType() { + return type; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PutInternalCcrRepositoryRequest that = (PutInternalCcrRepositoryRequest) o; + return Objects.equals(name, that.name) && + Objects.equals(type, that.type); + } + + @Override + public int hashCode() { + return Objects.hash(name, type); + } + + @Override + public String toString() { + return "PutInternalCcrRepositoryRequest{" + + "name='" + name + '\'' + + ", type='" + type + '\'' + + '}'; + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java new file mode 100644 index 00000000000..81f9bd3f2f9 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -0,0 +1,149 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.repository; + +import org.apache.lucene.index.IndexCommit; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotShardFailure; + +import java.io.IOException; +import java.util.List; + +/** + * This repository relies on a remote cluster for Ccr restores. It is read-only so it can only be used to + * restore shards/indexes that exist on the remote cluster. + */ +public class CcrRepository extends AbstractLifecycleComponent implements Repository { + + public static final String TYPE = "_ccr_"; + public static final String NAME_PREFIX = "_ccr_"; + + private final RepositoryMetaData metadata; + + public CcrRepository(RepositoryMetaData metadata, Settings settings) { + super(settings); + this.metadata = metadata; + } + + @Override + protected void doStart() { + + } + + @Override + protected void doStop() { + + } + + @Override + protected void doClose() throws IOException { + + } + + @Override + public RepositoryMetaData getMetadata() { + return metadata; + } + + @Override + public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) { + throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); + } + + @Override + public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) { + throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); + } + + @Override + public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId index) throws IOException { + throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); + } + + @Override + public RepositoryData getRepositoryData() { + throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); + } + + @Override + public void initializeSnapshot(SnapshotId snapshotId, List indices, MetaData metaData) { + throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); + } + + @Override + public SnapshotInfo finalizeSnapshot(SnapshotId snapshotId, List indices, long startTime, String failure, int totalShards, + List shardFailures, long repositoryStateId, boolean includeGlobalState) { + throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); + } + + @Override + public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { + throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); + } + + @Override + public long getSnapshotThrottleTimeInNanos() { + throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); + } + + @Override + public long getRestoreThrottleTimeInNanos() { + return 0; + } + + @Override + public String startVerification() { + throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); + } + + @Override + public void endVerification(String verificationToken) { + throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); + } + + @Override + public void verify(String verificationToken, DiscoveryNode localNode) { + } + + @Override + public boolean isReadOnly() { + return true; + } + + @Override + public void snapshotShard(IndexShard shard, Store store, SnapshotId snapshotId, IndexId indexId, IndexCommit snapshotIndexCommit, + IndexShardSnapshotStatus snapshotStatus) { + throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); + } + + @Override + public void restoreShard(IndexShard shard, SnapshotId snapshotId, Version version, IndexId indexId, ShardId snapshotShardId, + RecoveryState recoveryState) { + throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); + } + + @Override + public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, Version version, IndexId indexId, ShardId shardId) { + throw new UnsupportedOperationException("Unsupported for repository of type: " + TYPE); + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryManagerIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryManagerIT.java new file mode 100644 index 00000000000..133e1ee0606 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryManagerIT.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.CcrIntegTestCase; +import org.elasticsearch.xpack.ccr.repository.CcrRepository; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + +// TODO: Fold this integration test into a more expansive integration test as more bootstrap from remote work +// TODO: is completed. +public class CcrRepositoryManagerIT extends CcrIntegTestCase { + + public void testThatRepositoryIsPutAndRemovedWhenRemoteClusterIsUpdated() throws Exception { + String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster"; + final RepositoriesService repositoriesService = + getFollowerCluster().getDataOrMasterNodeInstances(RepositoriesService.class).iterator().next(); + try { + Repository repository = repositoriesService.repository(leaderClusterRepoName); + assertEquals(CcrRepository.TYPE, repository.getMetadata().type()); + assertEquals(leaderClusterRepoName, repository.getMetadata().name()); + } catch (RepositoryMissingException e) { + fail("need repository"); + } + + ClusterUpdateSettingsRequest putFollowerRequest = new ClusterUpdateSettingsRequest(); + String address = getFollowerCluster().getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); + putFollowerRequest.persistentSettings(Settings.builder().put("cluster.remote.follower_cluster_copy.seeds", address)); + assertAcked(followerClient().admin().cluster().updateSettings(putFollowerRequest).actionGet()); + + String followerCopyRepoName = CcrRepository.NAME_PREFIX + "follower_cluster_copy"; + try { + Repository repository = repositoriesService.repository(followerCopyRepoName); + assertEquals(CcrRepository.TYPE, repository.getMetadata().type()); + assertEquals(followerCopyRepoName, repository.getMetadata().name()); + } catch (RepositoryMissingException e) { + fail("need repository"); + } + + ClusterUpdateSettingsRequest deleteLeaderRequest = new ClusterUpdateSettingsRequest(); + deleteLeaderRequest.persistentSettings(Settings.builder().put("cluster.remote.leader_cluster.seeds", "")); + assertAcked(followerClient().admin().cluster().updateSettings(deleteLeaderRequest).actionGet()); + + expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(leaderClusterRepoName)); + + ClusterUpdateSettingsRequest deleteFollowerRequest = new ClusterUpdateSettingsRequest(); + deleteFollowerRequest.persistentSettings(Settings.builder().put("cluster.remote.follower_cluster_copy.seeds", "")); + assertAcked(followerClient().admin().cluster().updateSettings(deleteFollowerRequest).actionGet()); + + expectThrows(RepositoryMissingException.class, () -> repositoriesService.repository(followerCopyRepoName)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java index 58a909a62ad..098ba6dba69 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java @@ -111,10 +111,21 @@ public class GetAutoFollowPatternAction extends Action entry : autoFollowPatterns.entrySet()) { - builder.startObject(entry.getKey()); - entry.getValue().toXContent(builder, params); - builder.endObject(); + { + builder.startArray("patterns"); + for (Map.Entry entry : autoFollowPatterns.entrySet()) { + builder.startObject(); + { + builder.field("name", entry.getKey()); + builder.startObject("pattern"); + { + entry.getValue().toXContent(builder, params); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endArray(); } builder.endObject(); return builder; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index 92427fb92c4..d0686bd03d9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -53,7 +53,9 @@ import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.PersistentTaskPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.repositories.Repository; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptContext; @@ -393,6 +395,21 @@ public class LocalStateCompositeXPackPlugin extends XPackPlugin implements Scrip .collect(toList()); } + @Override + public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { + HashMap repositories = new HashMap<>(super.getRepositories(env, namedXContentRegistry)); + filterPlugins(RepositoryPlugin.class).forEach(r -> repositories.putAll(r.getRepositories(env, namedXContentRegistry))); + return repositories; + } + + @Override + public Map getInternalRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { + HashMap internalRepositories = new HashMap<>(super.getInternalRepositories(env, namedXContentRegistry)); + filterPlugins(RepositoryPlugin.class).forEach(r -> + internalRepositories.putAll(r.getInternalRepositories(env, namedXContentRegistry))); + return internalRepositories; + } + @Override public void close() throws IOException { IOUtils.close(plugins); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java index 3f645eab78c..229c47c763c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeRealmMigrateTool.java @@ -104,10 +104,10 @@ public class ESNativeRealmMigrateTool extends LoggingAwareMultiCommand { super("Migrates users or roles from file to native realm"); this.username = parser.acceptsAll(Arrays.asList("u", "username"), "User used to authenticate with Elasticsearch") - .withRequiredArg(); + .withRequiredArg().required(); this.password = parser.acceptsAll(Arrays.asList("p", "password"), "Password used to authenticate with Elasticsearch") - .withRequiredArg(); + .withRequiredArg().required(); this.url = parser.acceptsAll(Arrays.asList("U", "url"), "URL of Elasticsearch host") .withRequiredArg(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java index cbd0d7ca184..aa1461b189a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java @@ -70,7 +70,7 @@ class IndicesAndAliasesResolver { * then the index names will be categorized into those that refer to {@link ResolvedIndices#getLocal() local indices}, and those that * refer to {@link ResolvedIndices#getRemote() remote indices}. This categorization follows the standard * {@link RemoteClusterAware#buildRemoteIndexName(String, String) remote index-name format} and also respects the currently defined - * {@link RemoteClusterAware#getRemoteClusterNames() remote clusters}. + * remote clusters}. *


* Thus an index name N will considered to be remote if-and-only-if all of the following are true *
    @@ -438,11 +438,6 @@ class IndicesAndAliasesResolver { listenForUpdates(clusterSettings); } - @Override - protected Set getRemoteClusterNames() { - return clusters; - } - @Override protected void updateRemoteCluster(String clusterAlias, List addresses, String proxyAddress) { if (addresses.isEmpty()) { @@ -453,7 +448,7 @@ class IndicesAndAliasesResolver { } ResolvedIndices splitLocalAndRemoteIndexNames(String... indices) { - final Map> map = super.groupClusterIndices(indices, exists -> false); + final Map> map = super.groupClusterIndices(clusters, indices, exists -> false); final List local = map.remove(LOCAL_CLUSTER_GROUP_KEY); final List remote = map.entrySet().stream() .flatMap(e -> e.getValue().stream().map(v -> e.getKey() + REMOTE_CLUSTER_INDEX_SEPARATOR + v)) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java index 6d75cf09371..7f3e2cfce98 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.authc.esnative; +import joptsimple.OptionException; import joptsimple.OptionParser; import joptsimple.OptionSet; import org.elasticsearch.cli.MockTerminal; @@ -24,6 +25,7 @@ import java.util.Arrays; import java.util.HashSet; import java.util.Set; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; /** @@ -155,4 +157,13 @@ public class ESNativeMigrateToolTests extends NativeRealmIntegTestCase { assertThat("expected list to contain: " + r, roles.contains(r), is(true)); } } + + public void testMissingPasswordParameter() { + ESNativeRealmMigrateTool.MigrateUserOrRoles muor = new ESNativeRealmMigrateTool.MigrateUserOrRoles(); + + final OptionException ex = expectThrows(OptionException.class, + () -> muor.getParser().parse("-u", "elastic", "-U", "http://localhost:9200")); + + assertThat(ex.getMessage(), containsString("password")); + } } diff --git a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java index 13601d2fe20..8888ce33be5 100644 --- a/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java +++ b/x-pack/qa/evil-tests/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/SimpleKdcLdapServer.java @@ -13,7 +13,6 @@ import org.apache.kerby.kerberos.kerb.KrbException; import org.apache.kerby.kerberos.kerb.client.KrbConfig; import org.apache.kerby.kerberos.kerb.server.KdcConfigKey; import org.apache.kerby.kerberos.kerb.server.SimpleKdcServer; -import org.apache.kerby.util.NetworkUtil; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; @@ -22,6 +21,9 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.net.DatagramSocket; +import java.net.InetAddress; +import java.net.ServerSocket; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -31,6 +33,8 @@ import java.security.PrivilegedExceptionAction; import java.util.Locale; import java.util.concurrent.TimeUnit; +import javax.net.ServerSocketFactory; + /** * Utility wrapper around Apache {@link SimpleKdcServer} backed by Unboundid * {@link InMemoryDirectoryServer}.
    @@ -127,14 +131,14 @@ public class SimpleKdcLdapServer { simpleKdc.setWorkDir(workDir.toFile()); simpleKdc.setKdcHost(host); simpleKdc.setKdcRealm(realm); - if (kdcPort == 0) { - kdcPort = NetworkUtil.getServerPort(); - } if (transport != null) { - if (transport.trim().equals("TCP")) { + if (kdcPort == 0) { + kdcPort = getServerPort(transport); + } + if (transport.trim().equalsIgnoreCase("TCP")) { simpleKdc.setKdcTcpPort(kdcPort); simpleKdc.setAllowUdp(false); - } else if (transport.trim().equals("UDP")) { + } else if (transport.trim().equalsIgnoreCase("UDP")) { simpleKdc.setKdcUdpPort(kdcPort); simpleKdc.setAllowTcp(false); } else { @@ -221,4 +225,21 @@ public class SimpleKdcLdapServer { logger.info("SimpleKdcServer stoppped."); } + private static int getServerPort(String transport) { + if (transport != null && transport.trim().equalsIgnoreCase("TCP")) { + try (ServerSocket serverSocket = ServerSocketFactory.getDefault().createServerSocket(0, 1, + InetAddress.getByName("127.0.0.1"))) { + return serverSocket.getLocalPort(); + } catch (Exception ex) { + throw new RuntimeException("Failed to get a TCP server socket point"); + } + } else if (transport != null && transport.trim().equalsIgnoreCase("UDP")) { + try (DatagramSocket socket = new DatagramSocket(0, InetAddress.getByName("127.0.0.1"))) { + return socket.getLocalPort(); + } catch (Exception ex) { + throw new RuntimeException("Failed to get a UDP server socket point"); + } + } + throw new IllegalArgumentException("Invalid transport: " + transport); + } }