From 840a3bd5a6979d35255b85b38418ceda462ccc52 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 5 Jun 2018 08:50:06 -0700 Subject: [PATCH 01/22] [DOCS] Fixes security example (#31082) --- x-pack/docs/build.gradle | 1 - .../en/security/authorization/managing-roles.asciidoc | 8 +++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 314ffced4a0..17e0f2b70fd 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -81,7 +81,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/rest-api/ml/validate-job.asciidoc', 'en/rest-api/security/authenticate.asciidoc', 'en/rest-api/watcher/stats.asciidoc', - 'en/security/authorization/managing-roles.asciidoc', 'en/watcher/example-watches/watching-time-series-data.asciidoc', ] diff --git a/x-pack/docs/en/security/authorization/managing-roles.asciidoc b/x-pack/docs/en/security/authorization/managing-roles.asciidoc index 83edef1a67b..6ee5d9d39bb 100644 --- a/x-pack/docs/en/security/authorization/managing-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/managing-roles.asciidoc @@ -12,6 +12,8 @@ A role is defined by the following JSON structure: "indices": [ ... ] <3> } ----- +// NOTCONSOLE + <1> A list of usernames the owners of this role can <>. <2> A list of cluster privileges. These privileges define the cluster level actions users with this role are able to execute. This field @@ -37,6 +39,8 @@ The following describes the structure of an indices permissions entry: "query": "..." <4> } ------- +// NOTCONSOLE + <1> A list of indices (or index name patterns) to which the permissions in this entry apply. <2> The index level privileges the owners of the role have on the associated @@ -77,8 +81,9 @@ The following snippet shows an example definition of a `clicks_admin` role: [source,js] ----------- +POST /_xpack/security/role/clicks_admin { - "run_as": [ "clicks_watcher_1" ] + "run_as": [ "clicks_watcher_1" ], "cluster": [ "monitor" ], "indices": [ { @@ -92,6 +97,7 @@ The following snippet shows an example definition of a `clicks_admin` role: ] } ----------- +// CONSOLE Based on the above definition, users owning the `clicks_admin` role can: From 05ee0f8b6e79f900d3d627adcc2f52182a1d472a Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Tue, 5 Jun 2018 10:09:20 -0600 Subject: [PATCH 02/22] Add cors support to NioHttpServerTransport (#30827) This is related to #28898. This commit adds cors support to the nio http transport. Most of the work is copied directly from the netty module implementation. Additionally, this commit adds tests for the nio http channel. --- .../http/netty4/cors/Netty4CorsConfig.java | 19 +- .../http/nio/HttpReadWriteHandler.java | 15 +- .../http/nio/NioHttpChannel.java | 8 +- .../http/nio/NioHttpServerTransport.java | 52 ++- .../http/nio/cors/NioCorsConfig.java | 236 ++++++++++++ .../http/nio/cors/NioCorsConfigBuilder.java | 357 ++++++++++++++++++ .../http/nio/cors/NioCorsHandler.java | 235 ++++++++++++ .../http/nio/HttpReadWriteHandlerTests.java | 5 +- .../http/nio/NioHttpChannelTests.java | 349 +++++++++++++++++ .../http/nio/NioHttpServerTransportTests.java | 70 ++-- .../http/HttpHandlingSettings.java | 30 ++ 11 files changed, 1331 insertions(+), 45 deletions(-) create mode 100644 plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfig.java create mode 100644 plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfigBuilder.java create mode 100644 plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java create mode 100644 plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpChannelTests.java diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfig.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfig.java index 9c81c07e663..939d5540ecf 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfig.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/cors/Netty4CorsConfig.java @@ -76,7 +76,8 @@ public final class Netty4CorsConfig { } /** - * Determines whether a wildcard origin, '*', is supported. + * Determines whether a wildcard origin, '*', is supported. This also means that null origins are + * supported. * * @return {@code boolean} true if any origin is allowed. */ @@ -121,21 +122,21 @@ public final class Netty4CorsConfig { } /** - * Determines if cookies are supported for CORS requests. + * Determines if credentials are supported for CORS requests. * - * By default cookies are not included in CORS requests but if isCredentialsAllowed returns - * true cookies will be added to CORS requests. Setting this value to true will set the + * By default credentials are not included in CORS requests but if isCredentialsAllowed returns + * true credentials will be added to CORS requests. Setting this value to true will set the * CORS 'Access-Control-Allow-Credentials' response header to true. * - * Please note that cookie support needs to be enabled on the client side as well. - * The client needs to opt-in to send cookies by calling: + * Please note that credentials support needs to be enabled on the client side as well. + * The client needs to opt-in to send credentials by calling: *
      * xhr.withCredentials = true;
      * 
- * The default value for 'withCredentials' is false in which case no cookies are sent. - * Setting this to true will included cookies in cross origin requests. + * The default value for 'withCredentials' is false in which case no credentials are sent. + * Setting this to true will included credentials in cross origin requests. * - * @return {@code true} if cookies are supported. + * @return {@code true} if credentials are supported. */ public boolean isCredentialsAllowed() { return allowCredentials; diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java index 681736a311d..49e56036308 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java @@ -36,6 +36,8 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpPipelinedRequest; +import org.elasticsearch.http.nio.cors.NioCorsConfig; +import org.elasticsearch.http.nio.cors.NioCorsHandler; import org.elasticsearch.nio.FlushOperation; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioSocketChannel; @@ -50,6 +52,8 @@ import java.util.Collections; import java.util.List; import java.util.function.BiConsumer; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; + public class HttpReadWriteHandler implements ReadWriteHandler { private final NettyAdaptor adaptor; @@ -57,14 +61,16 @@ public class HttpReadWriteHandler implements ReadWriteHandler { private final NioHttpServerTransport transport; private final HttpHandlingSettings settings; private final NamedXContentRegistry xContentRegistry; + private final NioCorsConfig corsConfig; private final ThreadContext threadContext; HttpReadWriteHandler(NioSocketChannel nioChannel, NioHttpServerTransport transport, HttpHandlingSettings settings, - NamedXContentRegistry xContentRegistry, ThreadContext threadContext) { + NamedXContentRegistry xContentRegistry, NioCorsConfig corsConfig, ThreadContext threadContext) { this.nioChannel = nioChannel; this.transport = transport; this.settings = settings; this.xContentRegistry = xContentRegistry; + this.corsConfig = corsConfig; this.threadContext = threadContext; List handlers = new ArrayList<>(5); @@ -78,6 +84,9 @@ public class HttpReadWriteHandler implements ReadWriteHandler { if (settings.isCompression()) { handlers.add(new HttpContentCompressor(settings.getCompressionLevel())); } + if (settings.isCorsEnabled()) { + handlers.add(new NioCorsHandler(corsConfig)); + } handlers.add(new NioHttpPipeliningHandler(transport.getLogger(), settings.getPipeliningMaxEvents())); adaptor = new NettyAdaptor(handlers.toArray(new ChannelHandler[0])); @@ -178,7 +187,7 @@ public class HttpReadWriteHandler implements ReadWriteHandler { int sequence = pipelinedRequest.getSequence(); BigArrays bigArrays = transport.getBigArrays(); try { - innerChannel = new NioHttpChannel(nioChannel, bigArrays, httpRequest, sequence, settings, threadContext); + innerChannel = new NioHttpChannel(nioChannel, bigArrays, httpRequest, sequence, settings, corsConfig, threadContext); } catch (final IllegalArgumentException e) { if (badRequestCause == null) { badRequestCause = e; @@ -191,7 +200,7 @@ public class HttpReadWriteHandler implements ReadWriteHandler { Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters copiedRequest.uri(), copiedRequest); - innerChannel = new NioHttpChannel(nioChannel, bigArrays, innerRequest, sequence, settings, threadContext); + innerChannel = new NioHttpChannel(nioChannel, bigArrays, innerRequest, sequence, settings, corsConfig, threadContext); } channel = innerChannel; } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java index 61cafed86a5..634421b34ea 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java @@ -41,6 +41,8 @@ import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.http.nio.cors.NioCorsConfig; +import org.elasticsearch.http.nio.cors.NioCorsHandler; import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.RestResponse; @@ -58,17 +60,19 @@ public class NioHttpChannel extends AbstractRestChannel { private final BigArrays bigArrays; private final int sequence; + private final NioCorsConfig corsConfig; private final ThreadContext threadContext; private final FullHttpRequest nettyRequest; private final NioSocketChannel nioChannel; private final boolean resetCookies; NioHttpChannel(NioSocketChannel nioChannel, BigArrays bigArrays, NioHttpRequest request, int sequence, - HttpHandlingSettings settings, ThreadContext threadContext) { + HttpHandlingSettings settings, NioCorsConfig corsConfig, ThreadContext threadContext) { super(request, settings.getDetailedErrorsEnabled()); this.nioChannel = nioChannel; this.bigArrays = bigArrays; this.sequence = sequence; + this.corsConfig = corsConfig; this.threadContext = threadContext; this.nettyRequest = request.getRequest(); this.resetCookies = settings.isResetCookies(); @@ -87,6 +91,8 @@ public class NioHttpChannel extends AbstractRestChannel { } resp.setStatus(getStatus(response.status())); + NioCorsHandler.setCorsResponseHeaders(nettyRequest, resp, corsConfig); + String opaque = nettyRequest.headers().get("X-Opaque-Id"); if (opaque != null) { setHeaderField(resp, "X-Opaque-Id", opaque); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java index de5c166de3f..ce0ed83aad4 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java @@ -19,6 +19,7 @@ package org.elasticsearch.http.nio; +import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.timeout.ReadTimeoutException; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -28,6 +29,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; @@ -38,11 +40,13 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.AbstractHttpServerTransport; import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpStats; -import org.elasticsearch.http.AbstractHttpServerTransport; +import org.elasticsearch.http.nio.cors.NioCorsConfig; +import org.elasticsearch.http.nio.cors.NioCorsConfigBuilder; import org.elasticsearch.nio.AcceptingSelector; import org.elasticsearch.nio.AcceptorEventHandler; import org.elasticsearch.nio.BytesChannelContext; @@ -56,6 +60,7 @@ import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.SocketEventHandler; import org.elasticsearch.nio.SocketSelector; +import org.elasticsearch.rest.RestUtils; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -64,15 +69,23 @@ import java.net.InetSocketAddress; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import java.util.regex.Pattern; import static org.elasticsearch.common.settings.Setting.intSetting; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED; @@ -86,6 +99,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECE import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; +import static org.elasticsearch.http.nio.cors.NioCorsHandler.ANY_ORIGIN; public class NioHttpServerTransport extends AbstractHttpServerTransport { @@ -115,6 +129,7 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { private final Set socketChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); private NioGroup nioGroup; private HttpChannelFactory channelFactory; + private final NioCorsConfig corsConfig; public NioHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, HttpServerTransport.Dispatcher dispatcher) { @@ -136,6 +151,7 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { SETTING_HTTP_COMPRESSION_LEVEL.get(settings), SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings), pipeliningMaxEvents); + this.corsConfig = buildCorsConfig(settings); this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings); this.tcpKeepAlive = SETTING_HTTP_TCP_KEEP_ALIVE.get(settings); @@ -279,6 +295,38 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()), ex); } + static NioCorsConfig buildCorsConfig(Settings settings) { + if (SETTING_CORS_ENABLED.get(settings) == false) { + return NioCorsConfigBuilder.forOrigins().disable().build(); + } + String origin = SETTING_CORS_ALLOW_ORIGIN.get(settings); + final NioCorsConfigBuilder builder; + if (Strings.isNullOrEmpty(origin)) { + builder = NioCorsConfigBuilder.forOrigins(); + } else if (origin.equals(ANY_ORIGIN)) { + builder = NioCorsConfigBuilder.forAnyOrigin(); + } else { + Pattern p = RestUtils.checkCorsSettingForRegex(origin); + if (p == null) { + builder = NioCorsConfigBuilder.forOrigins(RestUtils.corsSettingAsArray(origin)); + } else { + builder = NioCorsConfigBuilder.forPattern(p); + } + } + if (SETTING_CORS_ALLOW_CREDENTIALS.get(settings)) { + builder.allowCredentials(); + } + String[] strMethods = Strings.tokenizeToStringArray(SETTING_CORS_ALLOW_METHODS.get(settings), ","); + HttpMethod[] methods = Arrays.stream(strMethods) + .map(HttpMethod::valueOf) + .toArray(HttpMethod[]::new); + return builder.allowedRequestMethods(methods) + .maxAge(SETTING_CORS_MAX_AGE.get(settings)) + .allowedRequestHeaders(Strings.tokenizeToStringArray(SETTING_CORS_ALLOW_HEADERS.get(settings), ",")) + .shortCircuit() + .build(); + } + private void closeChannels(List channels) { List> futures = new ArrayList<>(channels.size()); @@ -315,7 +363,7 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { public NioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { NioSocketChannel nioChannel = new NioSocketChannel(channel); HttpReadWriteHandler httpReadWritePipeline = new HttpReadWriteHandler(nioChannel,NioHttpServerTransport.this, - httpHandlingSettings, xContentRegistry, threadPool.getThreadContext()); + httpHandlingSettings, xContentRegistry, corsConfig, threadPool.getThreadContext()); Consumer exceptionHandler = (e) -> exceptionCaught(nioChannel, e); SocketChannelContext context = new BytesChannelContext(nioChannel, selector, exceptionHandler, httpReadWritePipeline, InboundChannelBuffer.allocatingInstance()); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfig.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfig.java new file mode 100644 index 00000000000..9848c26022e --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfig.java @@ -0,0 +1,236 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio.cors; + +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.EmptyHttpHeaders; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; + +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.regex.Pattern; + +/** + * Configuration for Cross-Origin Resource Sharing (CORS). + * + * This class was lifted from the Netty project: + * https://github.com/netty/netty + */ +public final class NioCorsConfig { + + private final Optional> origins; + private final Optional pattern; + private final boolean anyOrigin; + private final boolean enabled; + private final boolean allowCredentials; + private final long maxAge; + private final Set allowedRequestMethods; + private final Set allowedRequestHeaders; + private final boolean allowNullOrigin; + private final Map> preflightHeaders; + private final boolean shortCircuit; + + NioCorsConfig(final NioCorsConfigBuilder builder) { + origins = builder.origins.map(s -> new LinkedHashSet<>(s)); + pattern = builder.pattern; + anyOrigin = builder.anyOrigin; + enabled = builder.enabled; + allowCredentials = builder.allowCredentials; + maxAge = builder.maxAge; + allowedRequestMethods = builder.requestMethods; + allowedRequestHeaders = builder.requestHeaders; + allowNullOrigin = builder.allowNullOrigin; + preflightHeaders = builder.preflightHeaders; + shortCircuit = builder.shortCircuit; + } + + /** + * Determines if support for CORS is enabled. + * + * @return {@code true} if support for CORS is enabled, false otherwise. + */ + public boolean isCorsSupportEnabled() { + return enabled; + } + + /** + * Determines whether a wildcard origin, '*', is supported. This also means that null origins are + * supported. + * + * @return {@code boolean} true if any origin is allowed. + */ + public boolean isAnyOriginSupported() { + return anyOrigin; + } + + /** + * Returns the set of allowed origins. + * + * @return {@code Set} the allowed origins. + */ + public Optional> origins() { + return origins; + } + + /** + * Returns whether the input origin is allowed by this configuration. + * + * @return {@code true} if the origin is allowed, otherwise {@code false} + */ + public boolean isOriginAllowed(final String origin) { + if (origins.isPresent()) { + return origins.get().contains(origin); + } else if (pattern.isPresent()) { + return pattern.get().matcher(origin).matches(); + } + return false; + } + + /** + * Web browsers may set the 'Origin' request header to 'null' if a resource is loaded + * from the local file system. + * + * If isNullOriginAllowed is true then the server will response with the wildcard for the + * the CORS response header 'Access-Control-Allow-Origin'. + * + * @return {@code true} if a 'null' origin should be supported. + */ + public boolean isNullOriginAllowed() { + return allowNullOrigin; + } + + /** + * Determines if credentials are supported for CORS requests. + * + * By default credentials are not included in CORS requests but if isCredentialsAllowed returns + * true credentials will be added to CORS requests. Setting this value to true will set the + * CORS 'Access-Control-Allow-Credentials' response header to true. + * + * Please note that credentials support needs to be enabled on the client side as well. + * The client needs to opt-in to send credentials by calling: + *
+     * xhr.withCredentials = true;
+     * 
+ * The default value for 'withCredentials' is false in which case no credentials are sent. + * Setting this to true will included cookies in cross origin requests. + * + * @return {@code true} if credentials are supported. + */ + public boolean isCredentialsAllowed() { + return allowCredentials; + } + + /** + * Gets the maxAge setting. + * + * When making a preflight request the client has to perform two request with can be inefficient. + * This setting will set the CORS 'Access-Control-Max-Age' response header and enables the + * caching of the preflight response for the specified time. During this time no preflight + * request will be made. + * + * @return {@code long} the time in seconds that a preflight request may be cached. + */ + public long maxAge() { + return maxAge; + } + + /** + * Returns the allowed set of Request Methods. The Http methods that should be returned in the + * CORS 'Access-Control-Request-Method' response header. + * + * @return {@code Set} of {@link HttpMethod}s that represent the allowed Request Methods. + */ + public Set allowedRequestMethods() { + return Collections.unmodifiableSet(allowedRequestMethods); + } + + /** + * Returns the allowed set of Request Headers. + * + * The header names returned from this method will be used to set the CORS + * 'Access-Control-Allow-Headers' response header. + * + * @return {@code Set} of strings that represent the allowed Request Headers. + */ + public Set allowedRequestHeaders() { + return Collections.unmodifiableSet(allowedRequestHeaders); + } + + /** + * Returns HTTP response headers that should be added to a CORS preflight response. + * + * @return {@link HttpHeaders} the HTTP response headers to be added. + */ + public HttpHeaders preflightResponseHeaders() { + if (preflightHeaders.isEmpty()) { + return EmptyHttpHeaders.INSTANCE; + } + final HttpHeaders preflightHeaders = new DefaultHttpHeaders(); + for (Map.Entry> entry : this.preflightHeaders.entrySet()) { + final Object value = getValue(entry.getValue()); + if (value instanceof Iterable) { + preflightHeaders.add(entry.getKey().toString(), (Iterable) value); + } else { + preflightHeaders.add(entry.getKey().toString(), value); + } + } + return preflightHeaders; + } + + /** + * Determines whether a CORS request should be rejected if it's invalid before being + * further processing. + * + * CORS headers are set after a request is processed. This may not always be desired + * and this setting will check that the Origin is valid and if it is not valid no + * further processing will take place, and a error will be returned to the calling client. + * + * @return {@code true} if a CORS request should short-circuit upon receiving an invalid Origin header. + */ + public boolean isShortCircuit() { + return shortCircuit; + } + + private static T getValue(final Callable callable) { + try { + return callable.call(); + } catch (final Exception e) { + throw new IllegalStateException("Could not generate value for callable [" + callable + ']', e); + } + } + + @Override + public String toString() { + return "CorsConfig[enabled=" + enabled + + ", origins=" + origins + + ", anyOrigin=" + anyOrigin + + ", isCredentialsAllowed=" + allowCredentials + + ", maxAge=" + maxAge + + ", allowedRequestMethods=" + allowedRequestMethods + + ", allowedRequestHeaders=" + allowedRequestHeaders + + ", preflightHeaders=" + preflightHeaders + ']'; + } + +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfigBuilder.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfigBuilder.java new file mode 100644 index 00000000000..333e4931aa1 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsConfigBuilder.java @@ -0,0 +1,357 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio.cors; + +import io.netty.handler.codec.http.HttpMethod; + +import java.util.Arrays; +import java.util.Date; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.regex.Pattern; + +/** + * Builder used to configure and build a {@link NioCorsConfig} instance. + * + * This class was lifted from the Netty project: + * https://github.com/netty/netty + */ +public final class NioCorsConfigBuilder { + + /** + * Creates a Builder instance with it's origin set to '*'. + * + * @return Builder to support method chaining. + */ + public static NioCorsConfigBuilder forAnyOrigin() { + return new NioCorsConfigBuilder(); + } + + /** + * Creates a {@link NioCorsConfigBuilder} instance with the specified origin. + * + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public static NioCorsConfigBuilder forOrigin(final String origin) { + if ("*".equals(origin)) { + return new NioCorsConfigBuilder(); + } + return new NioCorsConfigBuilder(origin); + } + + + /** + * Create a {@link NioCorsConfigBuilder} instance with the specified pattern origin. + * + * @param pattern the regular expression pattern to match incoming origins on. + * @return {@link NioCorsConfigBuilder} with the configured origin pattern. + */ + public static NioCorsConfigBuilder forPattern(final Pattern pattern) { + if (pattern == null) { + throw new IllegalArgumentException("CORS pattern cannot be null"); + } + return new NioCorsConfigBuilder(pattern); + } + + /** + * Creates a {@link NioCorsConfigBuilder} instance with the specified origins. + * + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public static NioCorsConfigBuilder forOrigins(final String... origins) { + return new NioCorsConfigBuilder(origins); + } + + Optional> origins; + Optional pattern; + final boolean anyOrigin; + boolean allowNullOrigin; + boolean enabled = true; + boolean allowCredentials; + long maxAge; + final Set requestMethods = new HashSet<>(); + final Set requestHeaders = new HashSet<>(); + final Map> preflightHeaders = new HashMap<>(); + private boolean noPreflightHeaders; + boolean shortCircuit; + + /** + * Creates a new Builder instance with the origin passed in. + * + * @param origins the origin to be used for this builder. + */ + NioCorsConfigBuilder(final String... origins) { + this.origins = Optional.of(new LinkedHashSet<>(Arrays.asList(origins))); + pattern = Optional.empty(); + anyOrigin = false; + } + + /** + * Creates a new Builder instance allowing any origin, "*" which is the + * wildcard origin. + * + */ + NioCorsConfigBuilder() { + anyOrigin = true; + origins = Optional.empty(); + pattern = Optional.empty(); + } + + /** + * Creates a new Builder instance allowing any origin that matches the pattern. + * + * @param pattern the pattern to match against for incoming origins. + */ + NioCorsConfigBuilder(final Pattern pattern) { + this.pattern = Optional.of(pattern); + origins = Optional.empty(); + anyOrigin = false; + } + + /** + * Web browsers may set the 'Origin' request header to 'null' if a resource is loaded + * from the local file system. Calling this method will enable a successful CORS response + * with a wildcard for the CORS response header 'Access-Control-Allow-Origin'. + * + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + NioCorsConfigBuilder allowNullOrigin() { + allowNullOrigin = true; + return this; + } + + /** + * Disables CORS support. + * + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder disable() { + enabled = false; + return this; + } + + /** + * By default cookies are not included in CORS requests, but this method will enable cookies to + * be added to CORS requests. Calling this method will set the CORS 'Access-Control-Allow-Credentials' + * response header to true. + * + * Please note, that cookie support needs to be enabled on the client side as well. + * The client needs to opt-in to send cookies by calling: + *
+     * xhr.withCredentials = true;
+     * 
+ * The default value for 'withCredentials' is false in which case no cookies are sent. + * Setting this to true will included cookies in cross origin requests. + * + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder allowCredentials() { + allowCredentials = true; + return this; + } + + /** + * When making a preflight request the client has to perform two request with can be inefficient. + * This setting will set the CORS 'Access-Control-Max-Age' response header and enables the + * caching of the preflight response for the specified time. During this time no preflight + * request will be made. + * + * @param max the maximum time, in seconds, that the preflight response may be cached. + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder maxAge(final long max) { + maxAge = max; + return this; + } + + /** + * Specifies the allowed set of HTTP Request Methods that should be returned in the + * CORS 'Access-Control-Request-Method' response header. + * + * @param methods the {@link HttpMethod}s that should be allowed. + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder allowedRequestMethods(final HttpMethod... methods) { + requestMethods.addAll(Arrays.asList(methods)); + return this; + } + + /** + * Specifies the if headers that should be returned in the CORS 'Access-Control-Allow-Headers' + * response header. + * + * If a client specifies headers on the request, for example by calling: + *
+     * xhr.setRequestHeader('My-Custom-Header', "SomeValue");
+     * 
+ * the server will receive the above header name in the 'Access-Control-Request-Headers' of the + * preflight request. The server will then decide if it allows this header to be sent for the + * real request (remember that a preflight is not the real request but a request asking the server + * if it allow a request). + * + * @param headers the headers to be added to the preflight 'Access-Control-Allow-Headers' response header. + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder allowedRequestHeaders(final String... headers) { + requestHeaders.addAll(Arrays.asList(headers)); + return this; + } + + /** + * Returns HTTP response headers that should be added to a CORS preflight response. + * + * An intermediary like a load balancer might require that a CORS preflight request + * have certain headers set. This enables such headers to be added. + * + * @param name the name of the HTTP header. + * @param values the values for the HTTP header. + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder preflightResponseHeader(final CharSequence name, final Object... values) { + if (values.length == 1) { + preflightHeaders.put(name, new ConstantValueGenerator(values[0])); + } else { + preflightResponseHeader(name, Arrays.asList(values)); + } + return this; + } + + /** + * Returns HTTP response headers that should be added to a CORS preflight response. + * + * An intermediary like a load balancer might require that a CORS preflight request + * have certain headers set. This enables such headers to be added. + * + * @param name the name of the HTTP header. + * @param value the values for the HTTP header. + * @param the type of values that the Iterable contains. + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder preflightResponseHeader(final CharSequence name, final Iterable value) { + preflightHeaders.put(name, new ConstantValueGenerator(value)); + return this; + } + + /** + * Returns HTTP response headers that should be added to a CORS preflight response. + * + * An intermediary like a load balancer might require that a CORS preflight request + * have certain headers set. This enables such headers to be added. + * + * Some values must be dynamically created when the HTTP response is created, for + * example the 'Date' response header. This can be accomplished by using a Callable + * which will have its 'call' method invoked when the HTTP response is created. + * + * @param name the name of the HTTP header. + * @param valueGenerator a Callable which will be invoked at HTTP response creation. + * @param the type of the value that the Callable can return. + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder preflightResponseHeader(final CharSequence name, final Callable valueGenerator) { + preflightHeaders.put(name, valueGenerator); + return this; + } + + /** + * Specifies that no preflight response headers should be added to a preflight response. + * + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder noPreflightResponseHeaders() { + noPreflightHeaders = true; + return this; + } + + /** + * Specifies that a CORS request should be rejected if it's invalid before being + * further processing. + * + * CORS headers are set after a request is processed. This may not always be desired + * and this setting will check that the Origin is valid and if it is not valid no + * further processing will take place, and a error will be returned to the calling client. + * + * @return {@link NioCorsConfigBuilder} to support method chaining. + */ + public NioCorsConfigBuilder shortCircuit() { + shortCircuit = true; + return this; + } + + /** + * Builds a {@link NioCorsConfig} with settings specified by previous method calls. + * + * @return {@link NioCorsConfig} the configured CorsConfig instance. + */ + public NioCorsConfig build() { + if (preflightHeaders.isEmpty() && !noPreflightHeaders) { + preflightHeaders.put("date", DateValueGenerator.INSTANCE); + preflightHeaders.put("content-length", new ConstantValueGenerator("0")); + } + return new NioCorsConfig(this); + } + + /** + * This class is used for preflight HTTP response values that do not need to be + * generated, but instead the value is "static" in that the same value will be returned + * for each call. + */ + private static final class ConstantValueGenerator implements Callable { + + private final Object value; + + /** + * Sole constructor. + * + * @param value the value that will be returned when the call method is invoked. + */ + private ConstantValueGenerator(final Object value) { + if (value == null) { + throw new IllegalArgumentException("value must not be null"); + } + this.value = value; + } + + @Override + public Object call() { + return value; + } + } + + /** + * This callable is used for the DATE preflight HTTP response HTTP header. + * It's value must be generated when the response is generated, hence will be + * different for every call. + */ + private static final class DateValueGenerator implements Callable { + + static final DateValueGenerator INSTANCE = new DateValueGenerator(); + + @Override + public Date call() throws Exception { + return new Date(); + } + } + +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java new file mode 100644 index 00000000000..63585107037 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/cors/NioCorsHandler.java @@ -0,0 +1,235 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio.cors; + +import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import org.elasticsearch.common.Strings; + +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +/** + * Handles Cross Origin Resource Sharing (CORS) requests. + *

+ * This handler can be configured using a {@link NioCorsConfig}, please + * refer to this class for details about the configuration options available. + * + * This code was borrowed from Netty 4 and refactored to work for Elasticsearch's Netty 3 setup. + */ +public class NioCorsHandler extends ChannelDuplexHandler { + + public static final String ANY_ORIGIN = "*"; + private static Pattern SCHEME_PATTERN = Pattern.compile("^https?://"); + + private final NioCorsConfig config; + private HttpRequest request; + + /** + * Creates a new instance with the specified {@link NioCorsConfig}. + */ + public NioCorsHandler(final NioCorsConfig config) { + if (config == null) { + throw new NullPointerException(); + } + this.config = config; + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + if (config.isCorsSupportEnabled() && msg instanceof HttpRequest) { + request = (HttpRequest) msg; + if (isPreflightRequest(request)) { + handlePreflight(ctx, request); + return; + } + if (config.isShortCircuit() && !validateOrigin()) { + forbidden(ctx, request); + return; + } + } + ctx.fireChannelRead(msg); + } + + public static void setCorsResponseHeaders(HttpRequest request, HttpResponse resp, NioCorsConfig config) { + if (!config.isCorsSupportEnabled()) { + return; + } + String originHeader = request.headers().get(HttpHeaderNames.ORIGIN); + if (!Strings.isNullOrEmpty(originHeader)) { + final String originHeaderVal; + if (config.isAnyOriginSupported()) { + originHeaderVal = ANY_ORIGIN; + } else if (config.isOriginAllowed(originHeader) || isSameOrigin(originHeader, request.headers().get(HttpHeaderNames.HOST))) { + originHeaderVal = originHeader; + } else { + originHeaderVal = null; + } + if (originHeaderVal != null) { + resp.headers().add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN, originHeaderVal); + } + } + if (config.isCredentialsAllowed()) { + resp.headers().add(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS, "true"); + } + } + + private void handlePreflight(final ChannelHandlerContext ctx, final HttpRequest request) { + final HttpResponse response = new DefaultFullHttpResponse(request.protocolVersion(), HttpResponseStatus.OK, true, true); + if (setOrigin(response)) { + setAllowMethods(response); + setAllowHeaders(response); + setAllowCredentials(response); + setMaxAge(response); + setPreflightHeaders(response); + ctx.writeAndFlush(response).addListener(ChannelFutureListener.CLOSE); + } else { + forbidden(ctx, request); + } + } + + private static void forbidden(final ChannelHandlerContext ctx, final HttpRequest request) { + ctx.writeAndFlush(new DefaultFullHttpResponse(request.protocolVersion(), HttpResponseStatus.FORBIDDEN)) + .addListener(ChannelFutureListener.CLOSE); + } + + private static boolean isSameOrigin(final String origin, final String host) { + if (Strings.isNullOrEmpty(host) == false) { + // strip protocol from origin + final String originDomain = SCHEME_PATTERN.matcher(origin).replaceFirst(""); + if (host.equals(originDomain)) { + return true; + } + } + return false; + } + + /** + * This is a non CORS specification feature which enables the setting of preflight + * response headers that might be required by intermediaries. + * + * @param response the HttpResponse to which the preflight response headers should be added. + */ + private void setPreflightHeaders(final HttpResponse response) { + response.headers().add(config.preflightResponseHeaders()); + } + + private boolean setOrigin(final HttpResponse response) { + final String origin = request.headers().get(HttpHeaderNames.ORIGIN); + if (!Strings.isNullOrEmpty(origin)) { + if ("null".equals(origin) && config.isNullOriginAllowed()) { + setAnyOrigin(response); + return true; + } + + if (config.isAnyOriginSupported()) { + if (config.isCredentialsAllowed()) { + echoRequestOrigin(response); + setVaryHeader(response); + } else { + setAnyOrigin(response); + } + return true; + } + if (config.isOriginAllowed(origin)) { + setOrigin(response, origin); + setVaryHeader(response); + return true; + } + } + return false; + } + + private boolean validateOrigin() { + if (config.isAnyOriginSupported()) { + return true; + } + + final String origin = request.headers().get(HttpHeaderNames.ORIGIN); + if (Strings.isNullOrEmpty(origin)) { + // Not a CORS request so we cannot validate it. It may be a non CORS request. + return true; + } + + if ("null".equals(origin) && config.isNullOriginAllowed()) { + return true; + } + + // if the origin is the same as the host of the request, then allow + if (isSameOrigin(origin, request.headers().get(HttpHeaderNames.HOST))) { + return true; + } + + return config.isOriginAllowed(origin); + } + + private void echoRequestOrigin(final HttpResponse response) { + setOrigin(response, request.headers().get(HttpHeaderNames.ORIGIN)); + } + + private static void setVaryHeader(final HttpResponse response) { + response.headers().set(HttpHeaderNames.VARY, HttpHeaderNames.ORIGIN); + } + + private static void setAnyOrigin(final HttpResponse response) { + setOrigin(response, ANY_ORIGIN); + } + + private static void setOrigin(final HttpResponse response, final String origin) { + response.headers().set(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN, origin); + } + + private void setAllowCredentials(final HttpResponse response) { + if (config.isCredentialsAllowed() + && !response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN).equals(ANY_ORIGIN)) { + response.headers().set(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS, "true"); + } + } + + private static boolean isPreflightRequest(final HttpRequest request) { + final HttpHeaders headers = request.headers(); + return request.method().equals(HttpMethod.OPTIONS) && + headers.contains(HttpHeaderNames.ORIGIN) && + headers.contains(HttpHeaderNames.ACCESS_CONTROL_REQUEST_METHOD); + } + + private void setAllowMethods(final HttpResponse response) { + response.headers().set(HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS, config.allowedRequestMethods().stream() + .map(m -> m.name().trim()) + .collect(Collectors.toList())); + } + + private void setAllowHeaders(final HttpResponse response) { + response.headers().set(HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS, config.allowedRequestHeaders()); + } + + private void setMaxAge(final HttpResponse response) { + response.headers().set(HttpHeaderNames.ACCESS_CONTROL_MAX_AGE, config.maxAge()); + } + +} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java index cc8eeb77cc2..56cbab5295a 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java @@ -39,6 +39,8 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.http.nio.cors.NioCorsConfig; +import org.elasticsearch.http.nio.cors.NioCorsConfigBuilder; import org.elasticsearch.nio.FlushOperation; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioSocketChannel; @@ -95,7 +97,8 @@ public class HttpReadWriteHandlerTests extends ESTestCase { SETTING_PIPELINING_MAX_EVENTS.getDefault(settings)); ThreadContext threadContext = new ThreadContext(settings); nioSocketChannel = mock(NioSocketChannel.class); - handler = new HttpReadWriteHandler(nioSocketChannel, transport, httpHandlingSettings, NamedXContentRegistry.EMPTY, threadContext); + handler = new HttpReadWriteHandler(nioSocketChannel, transport, httpHandlingSettings, NamedXContentRegistry.EMPTY, + NioCorsConfigBuilder.forAnyOrigin().build(), threadContext); } public void testSuccessfulDecodeHttpRequest() throws IOException { diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpChannelTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpChannelTests.java new file mode 100644 index 00000000000..5fa0a7ae0a6 --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpChannelTests.java @@ -0,0 +1,349 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpVersion; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.http.nio.cors.NioCorsConfig; +import org.elasticsearch.http.nio.cors.NioCorsHandler; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.nio.channels.ClosedChannelException; +import java.nio.charset.StandardCharsets; +import java.util.function.BiConsumer; + +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class NioHttpChannelTests extends ESTestCase { + + private ThreadPool threadPool; + private MockBigArrays bigArrays; + private NioSocketChannel nioChannel; + private SocketChannelContext channelContext; + + @Before + public void setup() throws Exception { + nioChannel = mock(NioSocketChannel.class); + channelContext = mock(SocketChannelContext.class); + when(nioChannel.getContext()).thenReturn(channelContext); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + } + + public void testResponse() { + final FullHttpResponse response = executeRequest(Settings.EMPTY, "request-host"); + assertThat(response.content(), equalTo(ByteBufUtils.toByteBuf(new TestResponse().content()))); + } + + public void testCorsEnabledWithoutAllowOrigins() { + // Set up a HTTP transport with only the CORS enabled setting + Settings settings = Settings.builder() + .put(HttpTransportSettings.SETTING_CORS_ENABLED.getKey(), true) + .build(); + HttpResponse response = executeRequest(settings, "remote-host", "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), nullValue()); + } + + public void testCorsEnabledWithAllowOrigins() { + final String originValue = "remote-host"; + // create a http transport with CORS enabled and allow origin configured + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) + .build(); + HttpResponse response = executeRequest(settings, originValue, "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + } + + public void testCorsAllowOriginWithSameHost() { + String originValue = "remote-host"; + String host = "remote-host"; + // create a http transport with CORS enabled + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .build(); + HttpResponse response = executeRequest(settings, originValue, host); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + + originValue = "http://" + originValue; + response = executeRequest(settings, originValue, host); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + + originValue = originValue + ":5555"; + host = host + ":5555"; + response = executeRequest(settings, originValue, host); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + + originValue = originValue.replace("http", "https"); + response = executeRequest(settings, originValue, host); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + } + + public void testThatStringLiteralWorksOnMatch() { + final String originValue = "remote-host"; + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) + .put(SETTING_CORS_ALLOW_METHODS.getKey(), "get, options, post") + .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) + .build(); + HttpResponse response = executeRequest(settings, originValue, "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS), equalTo("true")); + } + + public void testThatAnyOriginWorks() { + final String originValue = NioCorsHandler.ANY_ORIGIN; + Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), originValue) + .build(); + HttpResponse response = executeRequest(settings, originValue, "request-host"); + // inspect response and validate + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN), notNullValue()); + String allowedOrigins = response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN); + assertThat(allowedOrigins, is(originValue)); + assertThat(response.headers().get(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS), nullValue()); + } + + public void testHeadersSet() { + Settings settings = Settings.builder().build(); + final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + httpRequest.headers().add(HttpHeaderNames.ORIGIN, "remote"); + final NioHttpRequest request = new NioHttpRequest(xContentRegistry(), httpRequest); + HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); + NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); + + // send a response + NioHttpChannel channel = new NioHttpChannel(nioChannel, bigArrays, request, 1, handlingSettings, corsConfig, + threadPool.getThreadContext()); + TestResponse resp = new TestResponse(); + final String customHeader = "custom-header"; + final String customHeaderValue = "xyz"; + resp.addHeader(customHeader, customHeaderValue); + channel.sendResponse(resp); + + // inspect what was written + ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(Object.class); + verify(channelContext).sendMessage(responseCaptor.capture(), any()); + Object nioResponse = responseCaptor.getValue(); + HttpResponse response = ((NioHttpResponse) nioResponse).getResponse(); + assertThat(response.headers().get("non-existent-header"), nullValue()); + assertThat(response.headers().get(customHeader), equalTo(customHeaderValue)); + assertThat(response.headers().get(HttpHeaderNames.CONTENT_LENGTH), equalTo(Integer.toString(resp.content().length()))); + assertThat(response.headers().get(HttpHeaderNames.CONTENT_TYPE), equalTo(resp.contentType())); + } + + @SuppressWarnings("unchecked") + public void testReleaseInListener() throws IOException { + final Settings settings = Settings.builder().build(); + final NamedXContentRegistry registry = xContentRegistry(); + final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + final NioHttpRequest request = new NioHttpRequest(registry, httpRequest); + HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); + NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); + + NioHttpChannel channel = new NioHttpChannel(nioChannel, bigArrays, request, 1, handlingSettings, + corsConfig, threadPool.getThreadContext()); + final BytesRestResponse response = new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, + JsonXContent.contentBuilder().startObject().endObject()); + assertThat(response.content(), not(instanceOf(Releasable.class))); + + // ensure we have reserved bytes + if (randomBoolean()) { + BytesStreamOutput out = channel.bytesOutput(); + assertThat(out, instanceOf(ReleasableBytesStreamOutput.class)); + } else { + try (XContentBuilder builder = channel.newBuilder()) { + // do something builder + builder.startObject().endObject(); + } + } + + channel.sendResponse(response); + Class> listenerClass = (Class>) (Class) BiConsumer.class; + ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(listenerClass); + verify(channelContext).sendMessage(any(), listenerCaptor.capture()); + BiConsumer listener = listenerCaptor.getValue(); + if (randomBoolean()) { + listener.accept(null, null); + } else { + listener.accept(null, new ClosedChannelException()); + } + // ESTestCase#after will invoke ensureAllArraysAreReleased which will fail if the response content was not released + } + + + @SuppressWarnings("unchecked") + public void testConnectionClose() throws Exception { + final Settings settings = Settings.builder().build(); + final FullHttpRequest httpRequest; + final boolean close = randomBoolean(); + if (randomBoolean()) { + httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + if (close) { + httpRequest.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE); + } + } else { + httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.GET, "/"); + if (!close) { + httpRequest.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE); + } + } + final NioHttpRequest request = new NioHttpRequest(xContentRegistry(), httpRequest); + + HttpHandlingSettings handlingSettings = HttpHandlingSettings.fromSettings(settings); + NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); + + NioHttpChannel channel = new NioHttpChannel(nioChannel, bigArrays, request, 1, handlingSettings, + corsConfig, threadPool.getThreadContext()); + final TestResponse resp = new TestResponse(); + channel.sendResponse(resp); + Class> listenerClass = (Class>) (Class) BiConsumer.class; + ArgumentCaptor> listenerCaptor = ArgumentCaptor.forClass(listenerClass); + verify(channelContext).sendMessage(any(), listenerCaptor.capture()); + BiConsumer listener = listenerCaptor.getValue(); + listener.accept(null, null); + if (close) { + verify(nioChannel, times(1)).close(); + } else { + verify(nioChannel, times(0)).close(); + } + } + + private FullHttpResponse executeRequest(final Settings settings, final String host) { + return executeRequest(settings, null, host); + } + + private FullHttpResponse executeRequest(final Settings settings, final String originValue, final String host) { + // construct request and send it over the transport layer + final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + if (originValue != null) { + httpRequest.headers().add(HttpHeaderNames.ORIGIN, originValue); + } + httpRequest.headers().add(HttpHeaderNames.HOST, host); + final NioHttpRequest request = new NioHttpRequest(xContentRegistry(), httpRequest); + + HttpHandlingSettings httpHandlingSettings = HttpHandlingSettings.fromSettings(settings); + NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); + NioHttpChannel channel = new NioHttpChannel(nioChannel, bigArrays, request, 1, httpHandlingSettings, corsConfig, + threadPool.getThreadContext()); + channel.sendResponse(new TestResponse()); + + // get the response + ArgumentCaptor responseCaptor = ArgumentCaptor.forClass(Object.class); + verify(channelContext, atLeastOnce()).sendMessage(responseCaptor.capture(), any()); + return ((NioHttpResponse) responseCaptor.getValue()).getResponse(); + } + + private static class TestResponse extends RestResponse { + + private final BytesReference reference; + + TestResponse() { + reference = ByteBufUtils.toBytesReference(Unpooled.copiedBuffer("content", StandardCharsets.UTF_8)); + } + + @Override + public String contentType() { + return "text"; + } + + @Override + public BytesReference content() { + return reference; + } + + @Override + public RestStatus status() { + return RestStatus.OK; + } + + } +} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java index 4741bd69a52..c43fc7d0723 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java @@ -32,6 +32,7 @@ import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.HttpVersion; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; @@ -45,6 +46,7 @@ import org.elasticsearch.http.BindHttpException; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.NullDispatcher; +import org.elasticsearch.http.nio.cors.NioCorsConfig; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestChannel; @@ -58,9 +60,19 @@ import org.junit.Before; import java.io.IOException; import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; +import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; +import java.util.Set; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE; import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; import static org.elasticsearch.rest.RestStatus.OK; import static org.hamcrest.Matchers.containsString; @@ -94,36 +106,36 @@ public class NioHttpServerTransportTests extends ESTestCase { bigArrays = null; } -// public void testCorsConfig() { -// final Set methods = new HashSet<>(Arrays.asList("get", "options", "post")); -// final Set headers = new HashSet<>(Arrays.asList("Content-Type", "Content-Length")); -// final String prefix = randomBoolean() ? " " : ""; // sometimes have a leading whitespace between comma delimited elements -// final Settings settings = Settings.builder() -// .put(SETTING_CORS_ENABLED.getKey(), true) -// .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "*") -// .put(SETTING_CORS_ALLOW_METHODS.getKey(), collectionToDelimitedString(methods, ",", prefix, "")) -// .put(SETTING_CORS_ALLOW_HEADERS.getKey(), collectionToDelimitedString(headers, ",", prefix, "")) -// .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) -// .build(); -// final Netty4CorsConfig corsConfig = Netty4HttpServerTransport.buildCorsConfig(settings); -// assertTrue(corsConfig.isAnyOriginSupported()); -// assertEquals(headers, corsConfig.allowedRequestHeaders()); -// assertEquals(methods, corsConfig.allowedRequestMethods().stream().map(HttpMethod::name).collect(Collectors.toSet())); -// } + public void testCorsConfig() { + final Set methods = new HashSet<>(Arrays.asList("get", "options", "post")); + final Set headers = new HashSet<>(Arrays.asList("Content-Type", "Content-Length")); + final String prefix = randomBoolean() ? " " : ""; // sometimes have a leading whitespace between comma delimited elements + final Settings settings = Settings.builder() + .put(SETTING_CORS_ENABLED.getKey(), true) + .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "*") + .put(SETTING_CORS_ALLOW_METHODS.getKey(), Strings.collectionToDelimitedString(methods, ",", prefix, "")) + .put(SETTING_CORS_ALLOW_HEADERS.getKey(), Strings.collectionToDelimitedString(headers, ",", prefix, "")) + .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) + .build(); + final NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); + assertTrue(corsConfig.isAnyOriginSupported()); + assertEquals(headers, corsConfig.allowedRequestHeaders()); + assertEquals(methods, corsConfig.allowedRequestMethods().stream().map(HttpMethod::name).collect(Collectors.toSet())); + } -// public void testCorsConfigWithDefaults() { -// final Set methods = Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_METHODS.getDefault(Settings.EMPTY)); -// final Set headers = Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_HEADERS.getDefault(Settings.EMPTY)); -// final long maxAge = SETTING_CORS_MAX_AGE.getDefault(Settings.EMPTY); -// final Settings settings = Settings.builder().put(SETTING_CORS_ENABLED.getKey(), true).build(); -// final Netty4CorsConfig corsConfig = Netty4HttpServerTransport.buildCorsConfig(settings); -// assertFalse(corsConfig.isAnyOriginSupported()); -// assertEquals(Collections.emptySet(), corsConfig.origins().get()); -// assertEquals(headers, corsConfig.allowedRequestHeaders()); -// assertEquals(methods, corsConfig.allowedRequestMethods().stream().map(HttpMethod::name).collect(Collectors.toSet())); -// assertEquals(maxAge, corsConfig.maxAge()); -// assertFalse(corsConfig.isCredentialsAllowed()); -// } + public void testCorsConfigWithDefaults() { + final Set methods = Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_METHODS.getDefault(Settings.EMPTY)); + final Set headers = Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_HEADERS.getDefault(Settings.EMPTY)); + final long maxAge = SETTING_CORS_MAX_AGE.getDefault(Settings.EMPTY); + final Settings settings = Settings.builder().put(SETTING_CORS_ENABLED.getKey(), true).build(); + final NioCorsConfig corsConfig = NioHttpServerTransport.buildCorsConfig(settings); + assertFalse(corsConfig.isAnyOriginSupported()); + assertEquals(Collections.emptySet(), corsConfig.origins().get()); + assertEquals(headers, corsConfig.allowedRequestHeaders()); + assertEquals(methods, corsConfig.allowedRequestMethods().stream().map(HttpMethod::name).collect(Collectors.toSet())); + assertEquals(maxAge, corsConfig.maxAge()); + assertFalse(corsConfig.isCredentialsAllowed()); + } /** * Test that {@link NioHttpServerTransport} supports the "Expect: 100-continue" HTTP header diff --git a/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java b/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java index df038e8303e..7559b058ea7 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java +++ b/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java @@ -19,6 +19,19 @@ package org.elasticsearch.http; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; + +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; + public class HttpHandlingSettings { private final int maxContentLength; @@ -30,6 +43,7 @@ public class HttpHandlingSettings { private final int compressionLevel; private final boolean detailedErrorsEnabled; private final int pipeliningMaxEvents; + private boolean corsEnabled; public HttpHandlingSettings(int maxContentLength, int maxChunkSize, int maxHeaderSize, int maxInitialLineLength, boolean resetCookies, boolean compression, int compressionLevel, boolean detailedErrorsEnabled, @@ -45,6 +59,18 @@ public class HttpHandlingSettings { this.pipeliningMaxEvents = pipeliningMaxEvents; } + public static HttpHandlingSettings fromSettings(Settings settings) { + return new HttpHandlingSettings(Math.toIntExact(SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings).getBytes()), + Math.toIntExact(SETTING_HTTP_MAX_CHUNK_SIZE.get(settings).getBytes()), + Math.toIntExact(SETTING_HTTP_MAX_HEADER_SIZE.get(settings).getBytes()), + Math.toIntExact(SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings).getBytes()), + SETTING_HTTP_RESET_COOKIES.get(settings), + SETTING_HTTP_COMPRESSION.get(settings), + SETTING_HTTP_COMPRESSION_LEVEL.get(settings), + SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings), + SETTING_PIPELINING_MAX_EVENTS.get(settings)); + } + public int getMaxContentLength() { return maxContentLength; } @@ -80,4 +106,8 @@ public class HttpHandlingSettings { public int getPipeliningMaxEvents() { return pipeliningMaxEvents; } + + public boolean isCorsEnabled() { + return corsEnabled; + } } From a1c9def64ecd09afc80c181b65150375ad8fa23b Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Tue, 5 Jun 2018 15:00:34 -0400 Subject: [PATCH 03/22] [Rollup] Disallow index patterns that match the rollup index (#30491) We should not allow the user to configure index patterns that also match the index which stores the rollup index. For example, it is quite natural for a user to specify `metricbeat-*` as the index pattern, and then store the rollups in `metricbeat-rolled`. This will start throwing errors as soon as the rollup index is created because the indexer will try to search it. Note: this does not prevent the user from matching against existing rollup indices. That should be prevented by the field-level validation during job creation. --- .../core/rollup/job/RollupJobConfig.java | 14 ++++++++- .../xpack/core/rollup/ConfigTestHelpers.java | 5 +-- .../action/TransportPutRollupJobAction.java | 4 +-- .../xpack/rollup/config/ConfigTests.java | 31 +++++++++++++++++++ .../rest-api-spec/test/rollup/put_job.yml | 1 - 5 files changed, 49 insertions(+), 6 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java index a799cbe9447..3818ebcf447 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; @@ -173,7 +174,7 @@ public class RollupJobConfig implements NamedWriteable, ToXContentObject { builder.endObject(); return builder; } - + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); @@ -336,6 +337,17 @@ public class RollupJobConfig implements NamedWriteable, ToXContentObject { if (indexPattern == null || indexPattern.isEmpty()) { throw new IllegalArgumentException("An index pattern is mandatory."); } + if (Regex.isMatchAllPattern(indexPattern)) { + throw new IllegalArgumentException("Index pattern must not match all indices (as it would match it's own rollup index"); + } + if (Regex.isSimpleMatchPattern(indexPattern)) { + if (Regex.simpleMatch(indexPattern, rollupIndex)) { + throw new IllegalArgumentException("Index pattern would match rollup index name which is not allowed."); + } + } + if (indexPattern.equals(rollupIndex)) { + throw new IllegalArgumentException("Rollup index may not be the same as the index pattern."); + } if (rollupIndex == null || rollupIndex.isEmpty()) { throw new IllegalArgumentException("A rollup index name is mandatory."); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java index 7522f474e77..3d82ac118f5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java @@ -27,8 +27,9 @@ public class ConfigTestHelpers { builder.setId(jobId); builder.setCron(getCronString()); builder.setTimeout(new TimeValue(ESTestCase.randomIntBetween(1,100))); - builder.setIndexPattern(ESTestCase.randomAlphaOfLengthBetween(1,10)); - builder.setRollupIndex(ESTestCase.randomAlphaOfLengthBetween(1,10)); + String indexPattern = ESTestCase.randomAlphaOfLengthBetween(1,10); + builder.setIndexPattern(indexPattern); + builder.setRollupIndex("rollup_" + indexPattern); // to ensure the index pattern != rollup index builder.setGroupConfig(ConfigTestHelpers.getGroupConfig().build()); builder.setPageSize(ESTestCase.randomIntBetween(1,10)); if (ESTestCase.randomBoolean()) { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java index 24dcb323e3d..819a8dfa3fe 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java @@ -95,8 +95,8 @@ public class TransportPutRollupJobAction extends TransportMasterNodeAction() { @Override diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java index f22a6c87a3f..e465c7883cf 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java @@ -122,6 +122,37 @@ public class ConfigTests extends ESTestCase { assertThat(e.getMessage(), equalTo("An index pattern is mandatory.")); } + public void testMatchAllIndexPattern() { + RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); + job.setIndexPattern("*"); + Exception e = expectThrows(IllegalArgumentException.class, job::build); + assertThat(e.getMessage(), equalTo("Index pattern must not match all indices (as it would match it's own rollup index")); + } + + public void testMatchOwnRollupPatternPrefix() { + RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); + job.setIndexPattern("foo-*"); + job.setRollupIndex("foo-rollup"); + Exception e = expectThrows(IllegalArgumentException.class, job::build); + assertThat(e.getMessage(), equalTo("Index pattern would match rollup index name which is not allowed.")); + } + + public void testMatchOwnRollupPatternSuffix() { + RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); + job.setIndexPattern("*-rollup"); + job.setRollupIndex("foo-rollup"); + Exception e = expectThrows(IllegalArgumentException.class, job::build); + assertThat(e.getMessage(), equalTo("Index pattern would match rollup index name which is not allowed.")); + } + + public void testIndexPatternIdenticalToRollup() { + RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); + job.setIndexPattern("foo"); + job.setRollupIndex("foo"); + Exception e = expectThrows(IllegalArgumentException.class, job::build); + assertThat(e.getMessage(), equalTo("Rollup index may not be the same as the index pattern.")); + } + public void testEmptyRollupIndex() { RollupJobConfig.Builder job = ConfigTestHelpers.getRollupJob("foo"); job.setRollupIndex(""); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml index 568a6261cda..717be0d6b25 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml @@ -188,4 +188,3 @@ setup: ] } - From 7c05f69c390172f789e7bd7f13af19175cfb36fb Mon Sep 17 00:00:00 2001 From: lcawl Date: Tue, 5 Jun 2018 16:43:55 -0700 Subject: [PATCH 04/22] [DOCS] Creates rest-api folder in docs --- docs/reference/index.asciidoc | 2 +- docs/reference/rest-api/index.asciidoc | 29 ++++++++++++++++++++++++++ x-pack/docs/en/rest-api/index.asciidoc | 29 -------------------------- 3 files changed, 30 insertions(+), 30 deletions(-) create mode 100644 docs/reference/rest-api/index.asciidoc delete mode 100644 x-pack/docs/en/rest-api/index.asciidoc diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 8567ed63b34..b38a554d681 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -63,7 +63,7 @@ include::{xes-repo-dir}/monitoring/index.asciidoc[] include::{xes-repo-dir}/rollup/index.asciidoc[] -include::{xes-repo-dir}/rest-api/index.asciidoc[] +include::rest-api/index.asciidoc[] include::{xes-repo-dir}/commands/index.asciidoc[] diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc new file mode 100644 index 00000000000..8c58246a0a6 --- /dev/null +++ b/docs/reference/rest-api/index.asciidoc @@ -0,0 +1,29 @@ +[role="xpack"] +[[xpack-api]] += {xpack} APIs + +[partintro] +-- +{xpack} exposes REST APIs that are used by the UI components and can be called +directly to configure and access {xpack} features. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +-- + + +include::{xes-repo-dir}/rest-api/info.asciidoc[] +include::{xes-repo-dir}/rest-api/graph/explore.asciidoc[] +include::{xes-repo-dir}/rest-api/licensing.asciidoc[] +include::{xes-repo-dir}/rest-api/migration.asciidoc[] +include::{xes-repo-dir}/rest-api/ml-api.asciidoc[] +include::{xes-repo-dir}/rest-api/rollup-api.asciidoc[] +include::{xes-repo-dir}/rest-api/security.asciidoc[] +include::{xes-repo-dir}/rest-api/watcher.asciidoc[] +include::{xes-repo-dir}/rest-api/defs.asciidoc[] diff --git a/x-pack/docs/en/rest-api/index.asciidoc b/x-pack/docs/en/rest-api/index.asciidoc deleted file mode 100644 index 85c72a78d99..00000000000 --- a/x-pack/docs/en/rest-api/index.asciidoc +++ /dev/null @@ -1,29 +0,0 @@ -[role="xpack"] -[[xpack-api]] -= {xpack} APIs - -[partintro] --- -{xpack} exposes REST APIs that are used by the UI components and can be called -directly to configure and access {xpack} features. - -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> --- - - -include::info.asciidoc[] -include::graph/explore.asciidoc[] -include::licensing.asciidoc[] -include::migration.asciidoc[] -include::ml-api.asciidoc[] -include::rollup-api.asciidoc[] -include::security.asciidoc[] -include::watcher.asciidoc[] -include::defs.asciidoc[] From 805648848d50b89e4c85d2f35a8de7456c67a694 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 5 Jun 2018 19:56:22 -0400 Subject: [PATCH 05/22] Add check for feature aware implementations (#31081) This commit adds a check that any class in X-Pack that is a feature aware custom also implements the appropriate mix-in interface in X-Pack. These interfaces provide a default implementation of FeatureAware#getRequiredFeature that returns that x-pack is the required feature. By implementing this interface, this gives a consistent way for X-Pack feature aware customs to return the appopriate required feature and this check enforces that all such feature aware customs return the appropriate required feature. --- build.gradle | 11 +- x-pack/plugin/build.gradle | 50 ++- x-pack/test/feature-aware/build.gradle | 16 + .../test/feature_aware/FeatureAwareCheck.java | 180 ++++++++++ .../feature_aware/FeatureAwareCheckTests.java | 323 ++++++++++++++++++ 5 files changed, 574 insertions(+), 6 deletions(-) create mode 100644 x-pack/test/feature-aware/build.gradle create mode 100644 x-pack/test/feature-aware/src/main/java/org/elasticsearch/xpack/test/feature_aware/FeatureAwareCheck.java create mode 100644 x-pack/test/feature-aware/src/test/java/org/elasticsearch/xpack/test/feature_aware/FeatureAwareCheckTests.java diff --git a/build.gradle b/build.gradle index 05ad5479e8d..620e043d1c0 100644 --- a/build.gradle +++ b/build.gradle @@ -226,6 +226,7 @@ subprojects { "org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:packages:deb', "org.elasticsearch.distribution.deb:elasticsearch-oss:${version}": ':distribution:packages:oss-deb', "org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage', + "org.elasticsearch.xpack.test:feature-aware:${version}": ':x-pack:test:feature-aware', // for transport client "org.elasticsearch.plugin:transport-netty4-client:${version}": ':modules:transport-netty4', "org.elasticsearch.plugin:reindex-client:${version}": ':modules:reindex', @@ -311,7 +312,15 @@ gradle.projectsEvaluated { // :test:framework:test cannot run before and after :server:test return } - configurations.all { + configurations.all { Configuration configuration -> + /* + * The featureAwarePlugin configuration has a dependency on x-pack:plugin:core and x-pack:plugin:core has a dependency on the + * featureAwarePlugin configuration. The below task ordering logic would force :x-pack:plugin:core:test + * :x-pack:test:feature-aware:test to depend on each other circularly. We break that cycle here. + */ + if (configuration.name == "featureAwarePlugin") { + return + } dependencies.all { Dependency dep -> Project upstreamProject = dependencyToProject(dep) if (upstreamProject != null) { diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 4a0b29c4258..ac423c42811 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -1,12 +1,8 @@ import org.elasticsearch.gradle.LoggedExec -import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.test.NodeInfo import java.nio.charset.StandardCharsets -import java.nio.file.Files -import java.nio.file.Path -import java.nio.file.StandardCopyOption -import org.elasticsearch.gradle.test.RunTask; apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -17,6 +13,50 @@ dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } +subprojects { + afterEvaluate { + if (project.plugins.hasPlugin(PluginBuildPlugin)) { + // see the root Gradle file for additional logic regarding this configuration + project.configurations.create('featureAwarePlugin') + project.dependencies.add('featureAwarePlugin', project.configurations.compileClasspath) + project.dependencies.add( + 'featureAwarePlugin', + "org.elasticsearch.xpack.test:feature-aware:${org.elasticsearch.gradle.VersionProperties.elasticsearch}") + project.dependencies.add('featureAwarePlugin', project.sourceSets.main.output.getClassesDirs()) + + final Task featureAwareTask = project.tasks.create("featureAwareCheck", LoggedExec) { + description = "Runs FeatureAwareCheck on main classes." + dependsOn project.configurations.featureAwarePlugin + + final File successMarker = new File(project.buildDir, 'markers/featureAware') + outputs.file(successMarker) + + executable = new File(project.runtimeJavaHome, 'bin/java') + + // default to main class files if such a source set exists + final List files = [] + if (project.sourceSets.findByName("main")) { + files.add(project.sourceSets.main.output.classesDir) + dependsOn project.tasks.classes + } + // filter out non-existent classes directories from empty source sets + final FileCollection classDirectories = project.files(files).filter { it.exists() } + + doFirst { + args('-cp', project.configurations.featureAwarePlugin.asPath, 'org.elasticsearch.xpack.test.feature_aware.FeatureAwareCheck') + classDirectories.each { args it.getAbsolutePath() } + } + doLast { + successMarker.parentFile.mkdirs() + successMarker.setText("", 'UTF-8') + } + } + + project.precommit.dependsOn featureAwareTask + } + } +} + // https://github.com/elastic/x-plugins/issues/724 configurations { testArtifacts.extendsFrom testRuntime diff --git a/x-pack/test/feature-aware/build.gradle b/x-pack/test/feature-aware/build.gradle new file mode 100644 index 00000000000..217ed25a2d4 --- /dev/null +++ b/x-pack/test/feature-aware/build.gradle @@ -0,0 +1,16 @@ +apply plugin: 'elasticsearch.build' + +dependencies { + compile 'org.ow2.asm:asm:6.2' + compile "org.elasticsearch:elasticsearch:${version}" + compile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile "org.elasticsearch.test:framework:${version}" +} + +forbiddenApisMain.enabled = true + +dependencyLicenses.enabled = false + +jarHell.enabled = false + +thirdPartyAudit.enabled = false diff --git a/x-pack/test/feature-aware/src/main/java/org/elasticsearch/xpack/test/feature_aware/FeatureAwareCheck.java b/x-pack/test/feature-aware/src/main/java/org/elasticsearch/xpack/test/feature_aware/FeatureAwareCheck.java new file mode 100644 index 00000000000..7746692b408 --- /dev/null +++ b/x-pack/test/feature-aware/src/main/java/org/elasticsearch/xpack/test/feature_aware/FeatureAwareCheck.java @@ -0,0 +1,180 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.test.feature_aware; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.objectweb.asm.ClassReader; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; +import java.util.function.Consumer; + +/** + * Used in the featureAwareCheck to check for classes in X-Pack that implement customs but do not extend the appropriate marker interface. + */ +public final class FeatureAwareCheck { + + /** + * Check the class directories specified by the arguments for classes in X-Pack that implement customs but do not extend the appropriate + * marker interface that provides a mix-in implementation of {@link ClusterState.FeatureAware#getRequiredFeature()}. + * + * @param args the class directories to check + * @throws IOException if an I/O exception is walking the class directories + */ + public static void main(final String[] args) throws IOException { + systemOutPrintln("checking for custom violations"); + final List violations = new ArrayList<>(); + checkDirectories(violations::add, args); + if (violations.isEmpty()) { + systemOutPrintln("no custom violations found"); + } else { + violations.forEach(violation -> + systemOutPrintln( + "class [" + violation.name + "] implements" + + " [" + violation.interfaceName + " but does not implement" + + " [" + violation.expectedInterfaceName + "]") + ); + throw new IllegalStateException( + "found custom" + (violations.size() == 1 ? "" : "s") + " in X-Pack not extending appropriate X-Pack mix-in"); + } + } + + @SuppressForbidden(reason = "System.out#println") + private static void systemOutPrintln(final String s) { + System.out.println(s); + } + + private static void checkDirectories( + final Consumer callback, + final String... classDirectories) throws IOException { + for (final String classDirectory : classDirectories) { + final Path root = pathsGet(classDirectory); + if (Files.isDirectory(root)) { + Files.walkFileTree(root, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException { + if (Files.isRegularFile(file) && file.getFileName().toString().endsWith(".class")) { + try (InputStream in = Files.newInputStream(file)) { + checkClass(in, callback); + } + } + return super.visitFile(file, attrs); + } + }); + } else { + throw new FileNotFoundException("class directory [" + classDirectory + "] should exist"); + } + } + } + + @SuppressForbidden(reason = "Paths#get") + private static Path pathsGet(final String pathString) { + return Paths.get(pathString); + } + + /** + * Represents a feature-aware violation. + */ + static class FeatureAwareViolation { + + final String name; + final String interfaceName; + final String expectedInterfaceName; + + /** + * Constructs a representation of a feature-aware violation. + * + * @param name the name of the custom class + * @param interfaceName the name of the feature-aware interface + * @param expectedInterfaceName the name of the expected mix-in class + */ + FeatureAwareViolation(final String name, final String interfaceName, final String expectedInterfaceName) { + this.name = name; + this.interfaceName = interfaceName; + this.expectedInterfaceName = expectedInterfaceName; + } + + } + + /** + * Loads a class from the specified input stream and checks that if it implements a feature-aware custom then it extends the appropriate + * mix-in interface from X-Pack. If the class does not, then the specified callback is invoked. + * + * @param in the input stream + * @param callback the callback to invoke + * @throws IOException if an I/O exception occurs loading the class hierarchy + */ + static void checkClass(final InputStream in, final Consumer callback) throws IOException { + // the class format only reports declared interfaces so we have to walk the hierarchy looking for all interfaces + final List interfaces = new ArrayList<>(); + ClassReader cr = new ClassReader(in); + final String name = cr.getClassName(); + do { + interfaces.addAll(Arrays.asList(cr.getInterfaces())); + final String superName = cr.getSuperName(); + if ("java/lang/Object".equals(superName)) { + break; + } + cr = new ClassReader(superName); + } while (true); + checkClass(name, interfaces, callback); + } + + private static void checkClass( + final String name, + final List interfaces, + final Consumer callback) { + checkCustomForClass(ClusterState.Custom.class, XPackPlugin.XPackClusterStateCustom.class, name, interfaces, callback); + checkCustomForClass(MetaData.Custom.class, XPackPlugin.XPackMetaDataCustom.class, name, interfaces, callback); + checkCustomForClass(PersistentTaskParams.class, XPackPlugin.XPackPersistentTaskParams.class, name, interfaces, callback); + } + + private static void checkCustomForClass( + final Class interfaceToCheck, + final Class expectedInterface, + final String name, + final List interfaces, + final Consumer callback) { + final Set interfaceSet = new TreeSet<>(interfaces); + final String interfaceToCheckName = formatClassName(interfaceToCheck); + final String expectedXPackInterfaceName = formatClassName(expectedInterface); + if (interfaceSet.contains(interfaceToCheckName) + && name.equals(expectedXPackInterfaceName) == false + && interfaceSet.contains(expectedXPackInterfaceName) == false) { + assert name.startsWith("org/elasticsearch/license") || name.startsWith("org/elasticsearch/xpack"); + callback.accept(new FeatureAwareViolation(name, interfaceToCheckName, expectedXPackInterfaceName)); + } + } + + /** + * Format the specified class to a name in the ASM format replacing all dots in the class name with forward-slashes. + * + * @param clazz the class whose name to format + * @return the formatted class name + */ + static String formatClassName(final Class clazz) { + return clazz.getName().replace(".", "/"); + } + +} diff --git a/x-pack/test/feature-aware/src/test/java/org/elasticsearch/xpack/test/feature_aware/FeatureAwareCheckTests.java b/x-pack/test/feature-aware/src/test/java/org/elasticsearch/xpack/test/feature_aware/FeatureAwareCheckTests.java new file mode 100644 index 00000000000..2dde9efce42 --- /dev/null +++ b/x-pack/test/feature-aware/src/test/java/org/elasticsearch/xpack/test/feature_aware/FeatureAwareCheckTests.java @@ -0,0 +1,323 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.test.feature_aware; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.equalTo; + +public class FeatureAwareCheckTests extends ESTestCase { + + public void testClusterStateCustomViolation() throws IOException { + runCustomViolationTest( + ClusterStateCustomViolation.class, + getClass(), + ClusterState.Custom.class, + XPackPlugin.XPackClusterStateCustom.class); + } + + public void testClusterStateCustom() throws IOException { + runCustomTest(XPackClusterStateCustom.class, getClass(), ClusterState.Custom.class, XPackPlugin.XPackClusterStateCustom.class); + } + + public void testClusterStateCustomMarkerInterface() throws IOException { + // marker interfaces do not implement the marker interface but should not fail the feature aware check + runCustomTest( + XPackPlugin.XPackClusterStateCustom.class, + XPackPlugin.class, + ClusterState.Custom.class, + XPackPlugin.XPackClusterStateCustom.class); + } + + public void testMetaDataCustomViolation() throws IOException { + runCustomViolationTest(MetaDataCustomViolation.class, getClass(), MetaData.Custom.class, XPackPlugin.XPackMetaDataCustom.class); + } + + public void testMetaDataCustom() throws IOException { + runCustomTest(XPackMetaDataCustom.class, getClass(), MetaData.Custom.class, XPackPlugin.XPackMetaDataCustom.class); + } + + public void testMetaDataCustomMarkerInterface() throws IOException { + // marker interfaces do not implement the marker interface but should not fail the feature aware check + runCustomTest( + XPackPlugin.XPackMetaDataCustom.class, + XPackPlugin.class, + MetaData.Custom.class, + XPackPlugin.XPackMetaDataCustom.class); + } + + public void testPersistentTaskParamsViolation() throws IOException { + runCustomViolationTest( + PersistentTaskParamsViolation.class, + getClass(), + PersistentTaskParams.class, + XPackPlugin.XPackPersistentTaskParams.class); + } + + public void testPersistentTaskParams() throws IOException { + runCustomTest(XPackPersistentTaskParams.class, getClass(), PersistentTaskParams.class, XPackPlugin.XPackPersistentTaskParams.class); + } + + public void testPersistentTaskParamsMarkerInterface() throws IOException { + // marker interfaces do not implement the marker interface but should not fail the feature aware check + runCustomTest( + XPackPlugin.XPackPersistentTaskParams.class, + XPackPlugin.class, + PersistentTaskParams.class, + XPackPlugin.XPackPersistentTaskParams.class); + } + + abstract class ClusterStateCustomFeatureAware implements ClusterState.Custom { + + private final String writeableName; + + ClusterStateCustomFeatureAware(final String writeableName) { + this.writeableName = writeableName; + } + + @Override + public Diff diff(ClusterState.Custom previousState) { + return null; + } + + @Override + public String getWriteableName() { + return writeableName; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT.minimumCompatibilityVersion(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + return builder; + } + + } + + class ClusterStateCustomViolation extends ClusterStateCustomFeatureAware { + + ClusterStateCustomViolation() { + super("cluster_state_custom_violation"); + } + } + + class XPackClusterStateCustom extends ClusterStateCustomFeatureAware implements XPackPlugin.XPackClusterStateCustom { + + XPackClusterStateCustom() { + super("x_pack_cluster_state_custom"); + } + + } + + abstract class MetaDataCustomFeatureAware implements MetaData.Custom { + + private final String writeableName; + + MetaDataCustomFeatureAware(final String writeableName) { + this.writeableName = writeableName; + } + + @Override + public EnumSet context() { + return MetaData.ALL_CONTEXTS; + } + + @Override + public Diff diff(MetaData.Custom previousState) { + return null; + } + + @Override + public String getWriteableName() { + return writeableName; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT.minimumCompatibilityVersion(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + return builder; + } + + } + + class MetaDataCustomViolation extends MetaDataCustomFeatureAware { + + MetaDataCustomViolation() { + super("meta_data_custom_violation"); + } + + } + + class XPackMetaDataCustom extends MetaDataCustomFeatureAware implements XPackPlugin.XPackMetaDataCustom { + + XPackMetaDataCustom() { + super("x_pack_meta_data_custom"); + } + + } + + abstract class PersistentTaskParamsFeatureAware implements PersistentTaskParams { + + private final String writeableName; + + PersistentTaskParamsFeatureAware(final String writeableName) { + this.writeableName = writeableName; + } + + @Override + public String getWriteableName() { + return writeableName; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT.minimumCompatibilityVersion(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + return builder; + } + + } + + class PersistentTaskParamsViolation extends PersistentTaskParamsFeatureAware { + + PersistentTaskParamsViolation() { + super("persistent_task_params_violation"); + } + + } + + class XPackPersistentTaskParams extends PersistentTaskParamsFeatureAware implements XPackPlugin.XPackPersistentTaskParams { + + XPackPersistentTaskParams() { + super("x_pack_persistent_task_params"); + } + + } + + private class FeatureAwareViolationConsumer implements Consumer { + + private final AtomicBoolean called = new AtomicBoolean(); + private final String name; + private final String interfaceName; + private final String expectedInterfaceName; + + FeatureAwareViolationConsumer(final String name, final String interfaceName, final String expectedInterfaceName) { + this.name = name; + this.interfaceName = interfaceName; + this.expectedInterfaceName = expectedInterfaceName; + } + + @Override + public void accept(final org.elasticsearch.xpack.test.feature_aware.FeatureAwareCheck.FeatureAwareViolation featureAwareViolation) { + called.set(true); + assertThat(featureAwareViolation.name, equalTo(name)); + assertThat(featureAwareViolation.interfaceName, equalTo(interfaceName)); + assertThat(featureAwareViolation.expectedInterfaceName, equalTo(expectedInterfaceName)); + } + + } + + /** + * Runs a test on an actual class implementing a custom interface and not the expected marker interface. + * + * @param clazz the custom implementation + * @param outerClazz the outer class to load the custom implementation relative to + * @param interfaceClazz the custom + * @param expectedInterfaceClazz the marker interface + * @throws IOException if an I/O error occurs reading the class + */ + private void runCustomViolationTest( + final Class clazz, + final Class outerClazz, + final Class interfaceClazz, + final Class expectedInterfaceClazz) throws IOException { + runTest(clazz, outerClazz, interfaceClazz, expectedInterfaceClazz, true); + } + + /** + * Runs a test on an actual class implementing a custom interface and the expected marker interface. + * + * @param clazz the custom implementation + * @param outerClazz the outer class to load the custom implementation relative to + * @param interfaceClazz the custom + * @param expectedInterfaceClazz the marker interface + * @throws IOException if an I/O error occurs reading the class + */ + private void runCustomTest( + final Class clazz, + final Class outerClazz, + final Class interfaceClazz, + final Class expectedInterfaceClazz) throws IOException { + runTest(clazz, outerClazz, interfaceClazz, expectedInterfaceClazz, false); + } + + /** + * Runs a test on an actual class implementing a custom interface and should implement the expected marker interface if and only if + * the specified violation parameter is false. + * + * @param clazz the custom implementation + * @param outerClazz the outer class to load the custom implementation relative to + * @param interfaceClazz the custom + * @param expectedInterfaceClazz the marker interface + * @param violation whether or not the actual class is expected to fail the feature aware check + * @throws IOException if an I/O error occurs reading the class + */ + private void runTest( + final Class clazz, + final Class outerClazz, + final Class interfaceClazz, + final Class expectedInterfaceClazz, + final boolean violation) throws IOException { + final String name = clazz.getName(); + final FeatureAwareViolationConsumer callback = + new FeatureAwareViolationConsumer( + FeatureAwareCheck.formatClassName(clazz), + FeatureAwareCheck.formatClassName(interfaceClazz), + FeatureAwareCheck.formatClassName(expectedInterfaceClazz)); + FeatureAwareCheck.checkClass(outerClazz.getResourceAsStream(name.substring(1 + name.lastIndexOf(".")) + ".class"), callback); + assertThat(callback.called.get(), equalTo(violation)); + } + +} From 735d0e671aa71a9c6ecf111eeca4d0355ea8167f Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 6 Jun 2018 07:40:21 +0200 Subject: [PATCH 06/22] Make PreBuiltAnalyzerProviderFactory plugable via AnalysisPlugin and move `finger_print`, `pattern` and `standard_html_strip` analyzers to analysis-common module. (both AnalysisProvider and PreBuiltAnalyzerProvider) Changed PreBuiltAnalyzerProviderFactory to extend from PreConfiguredAnalysisComponent and changed to make sure that predefined analyzers are always instantiated with the current ES version and if an instance is requested for a different version then delegate to PreBuiltCache. This is similar to the behaviour that exists today in AnalysisRegistry.PreBuiltAnalysis and PreBuiltAnalyzerProviderFactory. (#31095) Relates to #23658 --- .../analysis/common/CommonAnalysisPlugin.java | 23 ++++ .../analysis/common}/FingerprintAnalyzer.java | 4 +- .../common}/FingerprintAnalyzerProvider.java | 6 +- .../common/FingerprintTokenFilterFactory.java | 6 +- .../analysis/common}/PatternAnalyzer.java | 4 +- .../common}/PatternAnalyzerProvider.java | 6 +- .../common}/StandardHtmlStripAnalyzer.java | 4 +- .../StandardHtmlStripAnalyzerProvider.java | 6 +- .../common}/FingerprintAnalyzerTests.java | 2 +- .../common}/PatternAnalyzerTests.java | 2 +- .../test/analysis-common/20_analyzers.yml | 32 ++++++ .../index/analysis/AnalysisRegistry.java | 28 +++-- .../PreBuiltAnalyzerProviderFactory.java | 102 ++++++++++++++---- .../PreConfiguredAnalysisComponent.java | 9 +- .../indices/analysis/AnalysisModule.java | 20 ++-- .../indices/analysis/PreBuiltAnalyzers.java | 21 +--- .../analysis/PreBuiltCacheFactory.java | 21 ++++ .../elasticsearch/plugins/AnalysisPlugin.java | 8 ++ .../elasticsearch/index/IndexModuleTests.java | 2 +- .../index/analysis/AnalysisRegistryTests.java | 17 ++- .../highlight/HighlighterSearchIT.java | 34 +++++- .../xpack/watcher/WatcherPluginTests.java | 2 +- 22 files changed, 267 insertions(+), 92 deletions(-) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/FingerprintAnalyzer.java (94%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/FingerprintAnalyzerProvider.java (90%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/PatternAnalyzer.java (94%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/PatternAnalyzerProvider.java (88%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/StandardHtmlStripAnalyzer.java (95%) rename {server/src/main/java/org/elasticsearch/index/analysis => modules/analysis-common/src/main/java/org/elasticsearch/analysis/common}/StandardHtmlStripAnalyzerProvider.java (85%) rename {server/src/test/java/org/elasticsearch/index/analysis => modules/analysis-common/src/test/java/org/elasticsearch/analysis/common}/FingerprintAnalyzerTests.java (98%) rename {server/src/test/java/org/elasticsearch/index/analysis => modules/analysis-common/src/test/java/org/elasticsearch/analysis/common}/PatternAnalyzerTests.java (99%) diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 69c8afb3e2f..433bef902c1 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -19,6 +19,7 @@ package org.elasticsearch.analysis.common; +import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.StopFilter; @@ -79,7 +80,9 @@ import org.apache.lucene.analysis.util.ElisionFilter; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.CharFilterFactory; +import org.elasticsearch.index.analysis.PreBuiltAnalyzerProviderFactory; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenizer; @@ -87,6 +90,7 @@ import org.elasticsearch.index.analysis.SoraniNormalizationFilterFactory; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider; +import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.Plugin; import org.tartarus.snowball.ext.DutchStemmer; @@ -103,6 +107,15 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(CommonAnalysisPlugin.class)); + @Override + public Map>> getAnalyzers() { + Map>> analyzers = new TreeMap<>(); + analyzers.put("fingerprint", FingerprintAnalyzerProvider::new); + analyzers.put("standard_html_strip", StandardHtmlStripAnalyzerProvider::new); + analyzers.put("pattern", PatternAnalyzerProvider::new); + return analyzers; + } + @Override public Map> getTokenFilters() { Map> filters = new TreeMap<>(); @@ -197,6 +210,16 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { return tokenizers; } + @Override + public List getPreBuiltAnalyzerProviderFactories() { + List analyzers = new ArrayList<>(); + analyzers.add(new PreBuiltAnalyzerProviderFactory("standard_html_strip", CachingStrategy.LUCENE, + version -> new StandardHtmlStripAnalyzer(CharArraySet.EMPTY_SET))); + analyzers.add(new PreBuiltAnalyzerProviderFactory("pattern", CachingStrategy.ELASTICSEARCH, version -> + new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, CharArraySet.EMPTY_SET))); + return analyzers; + } + @Override public List getPreConfiguredCharFilters() { List filters = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzer.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzer.java similarity index 94% rename from server/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzer.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzer.java index 0a550f19aa7..d37239304cd 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzer.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzer.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; @@ -35,7 +35,7 @@ public final class FingerprintAnalyzer extends Analyzer { private final int maxOutputSize; private final CharArraySet stopWords; - public FingerprintAnalyzer(CharArraySet stopWords, char separator, int maxOutputSize) { + FingerprintAnalyzer(CharArraySet stopWords, char separator, int maxOutputSize) { this.separator = separator; this.maxOutputSize = maxOutputSize; this.stopWords = stopWords; diff --git a/server/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzerProvider.java similarity index 90% rename from server/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzerProvider.java index 6a777e7c931..f54b04bf309 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzerProvider.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; @@ -25,6 +25,8 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; /** @@ -42,7 +44,7 @@ public class FingerprintAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final StandardHtmlStripAnalyzer analyzer; - public StandardHtmlStripAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + StandardHtmlStripAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET; CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords); diff --git a/server/src/test/java/org/elasticsearch/index/analysis/FingerprintAnalyzerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/FingerprintAnalyzerTests.java similarity index 98% rename from server/src/test/java/org/elasticsearch/index/analysis/FingerprintAnalyzerTests.java rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/FingerprintAnalyzerTests.java index c5e854879e9..0933f3bf13a 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/FingerprintAnalyzerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/FingerprintAnalyzerTests.java @@ -1,4 +1,4 @@ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; /* * Licensed to Elasticsearch under one or more contributor diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PatternAnalyzerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PatternAnalyzerTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/index/analysis/PatternAnalyzerTests.java rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PatternAnalyzerTests.java index d80cbf66c34..d2d226d6250 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PatternAnalyzerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PatternAnalyzerTests.java @@ -1,4 +1,4 @@ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; /* * Licensed to Elasticsearch under one or more contributor diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml index 6ff3b8c8027..d38f63f5429 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml @@ -37,3 +37,35 @@ analyzer: bengali - length: { tokens: 1 } - match: { tokens.0.token: বার } + +--- +"fingerprint": + - do: + indices.analyze: + body: + text: A1 B2 A1 D4 C3 + analyzer: fingerprint + - length: { tokens: 1 } + - match: { tokens.0.token: a1 b2 c3 d4 } + +--- +"standard_html_strip": + - do: + indices.analyze: + body: + text: + analyzer: standard_html_strip + - length: { tokens: 2 } + - match: { tokens.0.token: bold } + - match: { tokens.1.token: italic } + +--- +"pattern": + - do: + indices.analyze: + body: + text: foo bar + analyzer: pattern + - length: { tokens: 2 } + - match: { tokens.0.token: foo } + - match: { tokens.1.token: bar } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index e421a19b2ac..61b5cb91712 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -22,7 +22,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -70,14 +69,16 @@ public final class AnalysisRegistry implements Closeable { Map>> normalizers, Map preConfiguredCharFilters, Map preConfiguredTokenFilters, - Map preConfiguredTokenizers) { + Map preConfiguredTokenizers, + Map preConfiguredAnalyzers) { this.environment = environment; this.charFilters = unmodifiableMap(charFilters); this.tokenFilters = unmodifiableMap(tokenFilters); this.tokenizers = unmodifiableMap(tokenizers); this.analyzers = unmodifiableMap(analyzers); this.normalizers = unmodifiableMap(normalizers); - prebuiltAnalysis = new PrebuiltAnalysis(preConfiguredCharFilters, preConfiguredTokenFilters, preConfiguredTokenizers); + prebuiltAnalysis = + new PrebuiltAnalysis(preConfiguredCharFilters, preConfiguredTokenFilters, preConfiguredTokenizers, preConfiguredAnalyzers); } /** @@ -398,13 +399,15 @@ public final class AnalysisRegistry implements Closeable { private PrebuiltAnalysis( Map preConfiguredCharFilters, Map preConfiguredTokenFilters, - Map preConfiguredTokenizers) { - Map analyzerProviderFactories = new HashMap<>(); + Map preConfiguredTokenizers, + Map preConfiguredAnalyzers) { - // Analyzers + Map analyzerProviderFactories = new HashMap<>(); + analyzerProviderFactories.putAll(preConfiguredAnalyzers); + // Pre-build analyzers for (PreBuiltAnalyzers preBuiltAnalyzerEnum : PreBuiltAnalyzers.values()) { String name = preBuiltAnalyzerEnum.name().toLowerCase(Locale.ROOT); - analyzerProviderFactories.put(name, new PreBuiltAnalyzerProviderFactory(name, AnalyzerScope.INDICES, preBuiltAnalyzerEnum.getAnalyzer(Version.CURRENT))); + analyzerProviderFactories.put(name, new PreBuiltAnalyzerProviderFactory(name, preBuiltAnalyzerEnum)); } this.analyzerProviderFactories = Collections.unmodifiableMap(analyzerProviderFactories); @@ -429,17 +432,10 @@ public final class AnalysisRegistry implements Closeable { return analyzerProviderFactories.get(name); } - Analyzer analyzer(String name) { - PreBuiltAnalyzerProviderFactory analyzerProviderFactory = (PreBuiltAnalyzerProviderFactory) analyzerProviderFactories.get(name); - if (analyzerProviderFactory == null) { - return null; - } - return analyzerProviderFactory.analyzer(); - } - @Override public void close() throws IOException { - IOUtils.close(analyzerProviderFactories.values().stream().map((a) -> ((PreBuiltAnalyzerProviderFactory)a).analyzer()).collect(Collectors.toList())); + IOUtils.close(analyzerProviderFactories.values().stream() + .map((a) -> ((PreBuiltAnalyzerProviderFactory)a)).collect(Collectors.toList())); } } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java b/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java index 3e59377ecc2..9317f9fb1e4 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java @@ -22,41 +22,101 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Analyzer; import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.analysis.PreBuiltAnalyzers; +import org.elasticsearch.indices.analysis.PreBuiltCacheFactory; +import java.io.Closeable; import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; -public class PreBuiltAnalyzerProviderFactory implements AnalysisModule.AnalysisProvider> { +public class PreBuiltAnalyzerProviderFactory extends PreConfiguredAnalysisComponent> implements Closeable { - private final PreBuiltAnalyzerProvider analyzerProvider; + private final Function create; + private final PreBuiltAnalyzerProvider current; - public PreBuiltAnalyzerProviderFactory(String name, AnalyzerScope scope, Analyzer analyzer) { - analyzerProvider = new PreBuiltAnalyzerProvider(name, scope, analyzer); + /** + * This constructor only exists to expose analyzers defined in {@link PreBuiltAnalyzers} as {@link PreBuiltAnalyzerProviderFactory}. + */ + PreBuiltAnalyzerProviderFactory(String name, PreBuiltAnalyzers preBuiltAnalyzer) { + super(name, new PreBuiltAnalyzersDelegateCache(name, preBuiltAnalyzer)); + this.create = preBuiltAnalyzer::getAnalyzer; + current = new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, preBuiltAnalyzer.getAnalyzer(Version.CURRENT)); } - public AnalyzerProvider create(String name, Settings settings) { - Version indexVersion = Version.indexCreated(settings); - if (!Version.CURRENT.equals(indexVersion)) { - PreBuiltAnalyzers preBuiltAnalyzers = PreBuiltAnalyzers.getOrDefault(name, null); - if (preBuiltAnalyzers != null) { - Analyzer analyzer = preBuiltAnalyzers.getAnalyzer(indexVersion); - return new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, analyzer); - } - } - - return analyzerProvider; + public PreBuiltAnalyzerProviderFactory(String name, PreBuiltCacheFactory.CachingStrategy cache, Function create) { + super(name, cache); + this.create = create; + this.current = new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, create.apply(Version.CURRENT)); } @Override - public AnalyzerProvider get(IndexSettings indexSettings, Environment environment, String name, Settings settings) - throws IOException { - return create(name, settings); + public AnalyzerProvider get(IndexSettings indexSettings, + Environment environment, + String name, + Settings settings) throws IOException { + Version versionCreated = Version.indexCreated(settings); + if (Version.CURRENT.equals(versionCreated) == false) { + return super.get(indexSettings, environment, name, settings); + } else { + return current; + } } - public Analyzer analyzer() { - return analyzerProvider.get(); + @Override + protected AnalyzerProvider create(Version version) { + assert Version.CURRENT.equals(version) == false; + return new PreBuiltAnalyzerProvider(getName(), AnalyzerScope.INDICES, create.apply(version)); + } + + @Override + public void close() throws IOException { + List closeables = cache.values().stream() + .map(AnalyzerProvider::get) + .collect(Collectors.toList()); + closeables.add(current.get()); + IOUtils.close(closeables); + } + + /** + * A special cache that closes the gap between PreBuiltAnalyzers and PreBuiltAnalyzerProviderFactory. + * + * This can be removed when all analyzers have been moved away from PreBuiltAnalyzers to + * PreBuiltAnalyzerProviderFactory either in server or analysis-common. + */ + static class PreBuiltAnalyzersDelegateCache implements PreBuiltCacheFactory.PreBuiltCache> { + + private final String name; + private final PreBuiltAnalyzers preBuiltAnalyzer; + + private PreBuiltAnalyzersDelegateCache(String name, PreBuiltAnalyzers preBuiltAnalyzer) { + this.name = name; + this.preBuiltAnalyzer = preBuiltAnalyzer; + } + + @Override + public AnalyzerProvider get(Version version) { + return new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, preBuiltAnalyzer.getAnalyzer(version)); + } + + @Override + public void put(Version version, AnalyzerProvider analyzerProvider) { + // No need to put, because we delegate in get() directly to PreBuiltAnalyzers which already caches. + } + + @Override + public Collection> values() { + return preBuiltAnalyzer.getCache().values().stream() + // Wrap the analyzer instance in a PreBuiltAnalyzerProvider, this is what PreBuiltAnalyzerProviderFactory#close expects + // (other caches are not directly caching analyzers, but analyzer provider instead. + .map(analyzer -> new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, analyzer)) + .collect(Collectors.toList()); + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredAnalysisComponent.java b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredAnalysisComponent.java index fdd525d0c80..f7450c15ee9 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredAnalysisComponent.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredAnalysisComponent.java @@ -33,13 +33,18 @@ import java.io.IOException; */ public abstract class PreConfiguredAnalysisComponent implements AnalysisModule.AnalysisProvider { private final String name; - private final PreBuiltCacheFactory.PreBuiltCache cache; + protected final PreBuiltCacheFactory.PreBuiltCache cache; - protected PreConfiguredAnalysisComponent(String name, PreBuiltCacheFactory.CachingStrategy cache) { + protected PreConfiguredAnalysisComponent(String name, PreBuiltCacheFactory.CachingStrategy cache) { this.name = name; this.cache = PreBuiltCacheFactory.getCache(cache); } + protected PreConfiguredAnalysisComponent(String name, PreBuiltCacheFactory.PreBuiltCache cache) { + this.name = name; + this.cache = cache; + } + @Override public T get(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException { Version versionCreated = Version.indexCreated(settings); diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index bc590381c3c..13aaf44c82e 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -43,7 +43,6 @@ import org.elasticsearch.index.analysis.CzechAnalyzerProvider; import org.elasticsearch.index.analysis.DanishAnalyzerProvider; import org.elasticsearch.index.analysis.DutchAnalyzerProvider; import org.elasticsearch.index.analysis.EnglishAnalyzerProvider; -import org.elasticsearch.index.analysis.FingerprintAnalyzerProvider; import org.elasticsearch.index.analysis.FinnishAnalyzerProvider; import org.elasticsearch.index.analysis.FrenchAnalyzerProvider; import org.elasticsearch.index.analysis.GalicianAnalyzerProvider; @@ -59,9 +58,9 @@ import org.elasticsearch.index.analysis.KeywordAnalyzerProvider; import org.elasticsearch.index.analysis.LatvianAnalyzerProvider; import org.elasticsearch.index.analysis.LithuanianAnalyzerProvider; import org.elasticsearch.index.analysis.NorwegianAnalyzerProvider; -import org.elasticsearch.index.analysis.PatternAnalyzerProvider; import org.elasticsearch.index.analysis.PersianAnalyzerProvider; import org.elasticsearch.index.analysis.PortugueseAnalyzerProvider; +import org.elasticsearch.index.analysis.PreBuiltAnalyzerProviderFactory; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenizer; @@ -73,7 +72,6 @@ import org.elasticsearch.index.analysis.SnowballAnalyzerProvider; import org.elasticsearch.index.analysis.SoraniAnalyzerProvider; import org.elasticsearch.index.analysis.SpanishAnalyzerProvider; import org.elasticsearch.index.analysis.StandardAnalyzerProvider; -import org.elasticsearch.index.analysis.StandardHtmlStripAnalyzerProvider; import org.elasticsearch.index.analysis.StandardTokenFilterFactory; import org.elasticsearch.index.analysis.StandardTokenizerFactory; import org.elasticsearch.index.analysis.StopAnalyzerProvider; @@ -122,11 +120,12 @@ public final class AnalysisModule { Map preConfiguredCharFilters = setupPreConfiguredCharFilters(plugins); Map preConfiguredTokenFilters = setupPreConfiguredTokenFilters(plugins); Map preConfiguredTokenizers = setupPreConfiguredTokenizers(plugins); + Map preConfiguredAnalyzers = setupPreBuiltAnalyzerProviderFactories(plugins); analysisRegistry = new AnalysisRegistry(environment, charFilters.getRegistry(), tokenFilters.getRegistry(), tokenizers.getRegistry(), analyzers.getRegistry(), normalizers.getRegistry(), - preConfiguredCharFilters, preConfiguredTokenFilters, preConfiguredTokenizers); + preConfiguredCharFilters, preConfiguredTokenFilters, preConfiguredTokenizers, preConfiguredAnalyzers); } HunspellService getHunspellService() { @@ -162,6 +161,16 @@ public final class AnalysisModule { return tokenFilters; } + static Map setupPreBuiltAnalyzerProviderFactories(List plugins) { + NamedRegistry preConfiguredCharFilters = new NamedRegistry<>("pre-built analyzer"); + for (AnalysisPlugin plugin : plugins) { + for (PreBuiltAnalyzerProviderFactory factory : plugin.getPreBuiltAnalyzerProviderFactories()) { + preConfiguredCharFilters.register(factory.getName(), factory); + } + } + return unmodifiableMap(preConfiguredCharFilters.getRegistry()); + } + static Map setupPreConfiguredCharFilters(List plugins) { NamedRegistry preConfiguredCharFilters = new NamedRegistry<>("pre-configured char_filter"); @@ -232,12 +241,10 @@ public final class AnalysisModule { NamedRegistry>> analyzers = new NamedRegistry<>("analyzer"); analyzers.register("default", StandardAnalyzerProvider::new); analyzers.register("standard", StandardAnalyzerProvider::new); - analyzers.register("standard_html_strip", StandardHtmlStripAnalyzerProvider::new); analyzers.register("simple", SimpleAnalyzerProvider::new); analyzers.register("stop", StopAnalyzerProvider::new); analyzers.register("whitespace", WhitespaceAnalyzerProvider::new); analyzers.register("keyword", KeywordAnalyzerProvider::new); - analyzers.register("pattern", PatternAnalyzerProvider::new); analyzers.register("snowball", SnowballAnalyzerProvider::new); analyzers.register("arabic", ArabicAnalyzerProvider::new); analyzers.register("armenian", ArmenianAnalyzerProvider::new); @@ -274,7 +281,6 @@ public final class AnalysisModule { analyzers.register("swedish", SwedishAnalyzerProvider::new); analyzers.register("turkish", TurkishAnalyzerProvider::new); analyzers.register("thai", ThaiAnalyzerProvider::new); - analyzers.register("fingerprint", FingerprintAnalyzerProvider::new); analyzers.extractAndRegister(plugins, AnalysisPlugin::getAnalyzers); return analyzers; } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java index 3c286f7dd5e..18cc247b844 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java @@ -61,10 +61,7 @@ import org.apache.lucene.analysis.sv.SwedishAnalyzer; import org.apache.lucene.analysis.th.ThaiAnalyzer; import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.elasticsearch.Version; -import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.index.analysis.PatternAnalyzer; import org.elasticsearch.index.analysis.SnowballAnalyzer; -import org.elasticsearch.index.analysis.StandardHtmlStripAnalyzer; import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; import java.util.Locale; @@ -141,22 +138,6 @@ public enum PreBuiltAnalyzers { } }, - PATTERN(CachingStrategy.ELASTICSEARCH) { - @Override - protected Analyzer create(Version version) { - return new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, CharArraySet.EMPTY_SET); - } - }, - - STANDARD_HTML_STRIP(CachingStrategy.ELASTICSEARCH) { - @Override - protected Analyzer create(Version version) { - final Analyzer analyzer = new StandardHtmlStripAnalyzer(CharArraySet.EMPTY_SET); - analyzer.setVersion(version.luceneVersion); - return analyzer; - } - }, - ARABIC { @Override protected Analyzer create(Version version) { @@ -484,7 +465,7 @@ public enum PreBuiltAnalyzers { cache = PreBuiltCacheFactory.getCache(cachingStrategy); } - PreBuiltCacheFactory.PreBuiltCache getCache() { + public PreBuiltCacheFactory.PreBuiltCache getCache() { return cache; } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java index 8636e04f20f..22b5a8ffaf4 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java @@ -21,6 +21,8 @@ package org.elasticsearch.indices.analysis; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -36,8 +38,12 @@ public class PreBuiltCacheFactory { public enum CachingStrategy { ONE, LUCENE, ELASTICSEARCH }; public interface PreBuiltCache { + T get(Version version); + void put(Version version, T t); + + Collection values(); } private PreBuiltCacheFactory() {} @@ -71,6 +77,11 @@ public class PreBuiltCacheFactory { public void put(Version version, T model) { this.model = model; } + + @Override + public Collection values() { + return Collections.singleton(model); + } } /** @@ -89,6 +100,11 @@ public class PreBuiltCacheFactory { public void put(Version version, T model) { mapModel.put(version, model); } + + @Override + public Collection values() { + return mapModel.values(); + } } /** @@ -107,5 +123,10 @@ public class PreBuiltCacheFactory { public void put(org.elasticsearch.Version version, T model) { mapModel.put(version.luceneVersion, model); } + + @Override + public Collection values() { + return mapModel.values(); + } } } diff --git a/server/src/main/java/org/elasticsearch/plugins/AnalysisPlugin.java b/server/src/main/java/org/elasticsearch/plugins/AnalysisPlugin.java index cc04ed875d9..e740fddc6ec 100644 --- a/server/src/main/java/org/elasticsearch/plugins/AnalysisPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/AnalysisPlugin.java @@ -28,6 +28,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.CharFilterFactory; +import org.elasticsearch.index.analysis.PreBuiltAnalyzerProviderFactory; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenizer; @@ -92,6 +93,13 @@ public interface AnalysisPlugin { return emptyMap(); } + /** + * Override to add additional pre-configured {@link Analyzer}s. + */ + default List getPreBuiltAnalyzerProviderFactories() { + return emptyList(); + } + /** * Override to add additional pre-configured {@link CharFilter}s. */ diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 008b05f6a1e..2824b8caca1 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -121,7 +121,7 @@ public class IndexModuleTests extends ESTestCase { index = indexSettings.getIndex(); environment = TestEnvironment.newEnvironment(settings); emptyAnalysisRegistry = new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), - emptyMap(), emptyMap(), emptyMap()); + emptyMap(), emptyMap(), emptyMap(), emptyMap()); threadPool = new TestThreadPool("test"); circuitBreakerService = new NoneCircuitBreakerService(); PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(settings); diff --git a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index 9c0f2b3c7a5..36da9761b97 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.VersionUtils; import java.io.IOException; +import java.util.Collections; import java.util.Map; import static java.util.Collections.emptyMap; @@ -48,6 +49,8 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; public class AnalysisRegistryTests extends ESTestCase { private AnalysisRegistry emptyRegistry; @@ -58,7 +61,7 @@ public class AnalysisRegistryTests extends ESTestCase { private static AnalysisRegistry emptyAnalysisRegistry(Settings settings) { return new AnalysisRegistry(TestEnvironment.newEnvironment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), - emptyMap(), emptyMap(), emptyMap()); + emptyMap(), emptyMap(), emptyMap(), emptyMap()); } private static IndexSettings indexSettingsOfCurrentVersion(Settings.Builder settings) { @@ -224,4 +227,16 @@ public class AnalysisRegistryTests extends ESTestCase { indexAnalyzers.close(); indexAnalyzers.close(); } + + public void testEnsureCloseInvocationProperlyDelegated() throws IOException { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + PreBuiltAnalyzerProviderFactory mock = mock(PreBuiltAnalyzerProviderFactory.class); + AnalysisRegistry registry = new AnalysisRegistry(TestEnvironment.newEnvironment(settings), emptyMap(), emptyMap(), + emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), Collections.singletonMap("key", mock)); + + registry.close(); + verify(mock).close(); + } } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index d1f91d60e25..717bab12ea5 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -19,7 +19,9 @@ package org.elasticsearch.search.fetch.subphase.highlight; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; @@ -32,6 +34,8 @@ import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.IdsQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; @@ -41,6 +45,8 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; @@ -63,6 +69,7 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import static java.util.Collections.singletonMap; import static org.elasticsearch.client.Requests.searchRequest; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; @@ -106,7 +113,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class, MockKeywordPlugin.class); + return Arrays.asList(InternalSettingsPlugin.class, MockKeywordPlugin.class, MockWhitespacePlugin.class); } public void testHighlightingWithStoredKeyword() throws IOException { @@ -1599,8 +1606,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { assertAcked(prepareCreate("test") .setSettings(Settings.builder() .put(indexSettings()) - .put("analysis.analyzer.my_analyzer.type", "pattern") - .put("analysis.analyzer.my_analyzer.pattern", "\\s+") + .put("analysis.analyzer.my_analyzer.type", "mock_whitespace") .build()) .addMapping("type", "text", "type=text,analyzer=my_analyzer")); ensureGreen(); @@ -1611,7 +1617,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("text", "test")) .highlighter(new HighlightBuilder().field("text")).execute().actionGet(); - // PatternAnalyzer will throw an exception if it is resetted twice + // Mock tokenizer will throw an exception if it is resetted twice assertHitCount(response, 1L); } @@ -2976,4 +2982,22 @@ public class HighlighterSearchIT extends ESIntegTestCase { assertThat(field.getFragments()[0].string(), equalTo("Hello World")); } } + + public static class MockWhitespacePlugin extends Plugin implements AnalysisPlugin { + + @Override + public Map>> getAnalyzers() { + return singletonMap("mock_whitespace", (indexSettings, environment, name, settings) -> { + return new AbstractIndexAnalyzerProvider(indexSettings, name, settings) { + + MockAnalyzer instance = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); + + @Override + public Analyzer get() { + return instance; + } + }; + }); + } + } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java index c7c2b59caaa..786aae0c521 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java @@ -67,7 +67,7 @@ public class WatcherPluginTests extends ESTestCase { // ensure index module is not called, even if watches index is tried IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(Watch.INDEX, settings); AnalysisRegistry registry = new AnalysisRegistry(TestEnvironment.newEnvironment(settings), emptyMap(), emptyMap(), emptyMap(), - emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()); + emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()); IndexModule indexModule = new IndexModule(indexSettings, registry); // this will trip an assertion if the watcher indexing operation listener is null (which it is) but we try to add it watcher.onIndexModule(indexModule); From e9bd92fc764894445452b362b428aca2358288aa Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Wed, 6 Jun 2018 07:44:01 +0200 Subject: [PATCH 07/22] fixed typo --- .../index/analysis/PreBuiltAnalyzerProviderFactory.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java b/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java index 9317f9fb1e4..eedff2c349c 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java @@ -113,7 +113,7 @@ public class PreBuiltAnalyzerProviderFactory extends PreConfiguredAnalysisCompon public Collection> values() { return preBuiltAnalyzer.getCache().values().stream() // Wrap the analyzer instance in a PreBuiltAnalyzerProvider, this is what PreBuiltAnalyzerProviderFactory#close expects - // (other caches are not directly caching analyzers, but analyzer provider instead. + // (other caches are not directly caching analyzers, but analyzer provider instead) .map(analyzer -> new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, analyzer)) .collect(Collectors.toList()); } From 1cee45e768b1b2d37961d5504563971b3bd0991d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 6 Jun 2018 09:53:14 +0200 Subject: [PATCH 08/22] [Docs] Delete superfluous callouts (#31111) Those callout create rendering problems on the subsequent page. Closes #30532 --- docs/reference/mapping/params/properties.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/mapping/params/properties.asciidoc b/docs/reference/mapping/params/properties.asciidoc index fa74bffd9d3..e50c0b3ac77 100644 --- a/docs/reference/mapping/params/properties.asciidoc +++ b/docs/reference/mapping/params/properties.asciidoc @@ -78,7 +78,7 @@ GET my_index/_search { "query": { "match": { - "manager.name": "Alice White" <1> + "manager.name": "Alice White" } }, "aggs": { @@ -89,7 +89,7 @@ GET my_index/_search "aggs": { "Employee Ages": { "histogram": { - "field": "employees.age", <2> + "field": "employees.age", "interval": 5 } } From 0c9d4cb4178f55b88f0da1dfc41939405c3a8d00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 6 Jun 2018 09:58:16 +0200 Subject: [PATCH 09/22] Fix expectation on parsing exception (#31108) The structure of the expected exception slightly changed, the change adapts the assertions accordingly. Closes #31104 --- .../index/rankeval/RatedRequestsTests.java | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java index 1be1acb1317..f77951dd58b 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java @@ -131,22 +131,19 @@ public class RatedRequestsTests extends ESTestCase { } } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/31104") public void testXContentParsingIsNotLenient() throws IOException { RatedRequest testItem = createTestItem(randomBoolean()); XContentType xContentType = randomFrom(XContentType.values()); BytesReference originalBytes = toShuffledXContent(testItem, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, null, random()); try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { - Exception exception = expectThrows(Exception.class, () -> RatedRequest.fromXContent(parser)); - if (exception instanceof XContentParseException) { - XContentParseException xcpe = (XContentParseException) exception; - assertThat(xcpe.getCause().getMessage(), containsString("unknown field")); - assertThat(xcpe.getCause().getMessage(), containsString("parser not found")); - } - if (exception instanceof XContentParseException) { + Throwable exception = expectThrows(XContentParseException.class, () -> RatedRequest.fromXContent(parser)); + if (exception.getCause() != null) { assertThat(exception.getMessage(), containsString("[request] failed to parse field")); + exception = exception.getCause(); } + assertThat(exception.getMessage(), containsString("unknown field")); + assertThat(exception.getMessage(), containsString("parser not found")); } } From d09d60858a6acdaa794201d0fca0115a4d97cb5d Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 6 Jun 2018 09:32:45 +0100 Subject: [PATCH 10/22] [DOCS] Clarify nested datatype introduction (#31055) --- docs/reference/mapping/types/nested.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index 804fb1c6508..238e26bf337 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -2,8 +2,8 @@ === Nested datatype The `nested` type is a specialised version of the <> datatype -that allows arrays of objects to be indexed and queried independently of each -other. +that allows arrays of objects to be indexed in a way that they can be queried +independently of each other. ==== How arrays of objects are flattened From 23d156f02306c0373c0689c0004a53a9e7c1e0ee Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 6 Jun 2018 13:39:02 +0200 Subject: [PATCH 11/22] Move RestGetSettingsAction to RestToXContentListener (#31101) --- .../admin/indices/RestGetSettingsAction.java | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java index d9fa50cf941..6dead806042 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java @@ -20,23 +20,18 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestStatus.OK; public class RestGetSettingsAction extends BaseRestHandler { @@ -68,15 +63,6 @@ public class RestGetSettingsAction extends BaseRestHandler { .names(names); getSettingsRequest.local(request.paramAsBoolean("local", getSettingsRequest.local())); getSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSettingsRequest.masterNodeTimeout())); - - return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestBuilderListener(channel) { - - @Override - public RestResponse buildResponse(GetSettingsResponse getSettingsResponse, XContentBuilder builder) throws Exception { - getSettingsResponse.toXContent(builder, request); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestToXContentListener<>(channel)); } - } From 0c8c6191816e464c065672d1b4b9cd91b26bb0e5 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Wed, 6 Jun 2018 14:59:04 +0300 Subject: [PATCH 12/22] Fix audit index template upgrade loop (#30779) The Index Audit trail allows the override of the template index settings with settings specified on the conf file. A bug will manifest when such conf file settings are specified for templates that need to be upgraded. The bug is an endless upgrade loop because the upgrade, although successful, is not reckoned as such by the upgrade service. --- .../metadata/TemplateUpgradeService.java | 59 +++++-- .../metadata/TemplateUpgradeServiceTests.java | 164 ++++++++++++------ .../security/audit/index/IndexAuditTrail.java | 12 +- .../audit/index/IndexAuditTrailTests.java | 25 +++ 4 files changed, 185 insertions(+), 75 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java index 3bdc949752a..024cc44dd6a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; -import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; @@ -32,8 +31,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -57,6 +54,7 @@ import java.util.HashSet; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.UnaryOperator; @@ -74,7 +72,7 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster public final Client client; - private final AtomicInteger updatesInProgress = new AtomicInteger(); + final AtomicInteger upgradesInProgress = new AtomicInteger(); private ImmutableOpenMap lastTemplateMetaData; @@ -103,8 +101,8 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster return; } - if (updatesInProgress.get() > 0) { - // we are already running some updates - skip this cluster state update + if (upgradesInProgress.get() > 0) { + // we are already running some upgrades - skip this cluster state update return; } @@ -124,7 +122,7 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster lastTemplateMetaData = templates; Optional, Set>> changes = calculateTemplateChanges(templates); if (changes.isPresent()) { - if (updatesInProgress.compareAndSet(0, changes.get().v1().size() + changes.get().v2().size())) { + if (upgradesInProgress.compareAndSet(0, changes.get().v1().size() + changes.get().v2().size() + 1)) { logger.info("Starting template upgrade to version {}, {} templates will be updated and {} will be removed", Version.CURRENT, changes.get().v1().size(), @@ -133,13 +131,14 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { threadContext.markAsSystemContext(); - threadPool.generic().execute(() -> updateTemplates(changes.get().v1(), changes.get().v2())); + threadPool.generic().execute(() -> upgradeTemplates(changes.get().v1(), changes.get().v2())); } } } } - void updateTemplates(Map changes, Set deletions) { + void upgradeTemplates(Map changes, Set deletions) { + final AtomicBoolean anyUpgradeFailed = new AtomicBoolean(false); if (threadPool.getThreadContext().isSystemContext() == false) { throw new IllegalStateException("template updates from the template upgrade service should always happen in a system context"); } @@ -151,20 +150,18 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster client.admin().indices().putTemplate(request, new ActionListener() { @Override public void onResponse(PutIndexTemplateResponse response) { - if (updatesInProgress.decrementAndGet() == 0) { - logger.info("Finished upgrading templates to version {}", Version.CURRENT); - } if (response.isAcknowledged() == false) { + anyUpgradeFailed.set(true); logger.warn("Error updating template [{}], request was not acknowledged", change.getKey()); } + tryFinishUpgrade(anyUpgradeFailed); } @Override public void onFailure(Exception e) { - if (updatesInProgress.decrementAndGet() == 0) { - logger.info("Templates were upgraded to version {}", Version.CURRENT); - } + anyUpgradeFailed.set(true); logger.warn(new ParameterizedMessage("Error updating template [{}]", change.getKey()), e); + tryFinishUpgrade(anyUpgradeFailed); } }); } @@ -175,27 +172,51 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster client.admin().indices().deleteTemplate(request, new ActionListener() { @Override public void onResponse(DeleteIndexTemplateResponse response) { - updatesInProgress.decrementAndGet(); if (response.isAcknowledged() == false) { + anyUpgradeFailed.set(true); logger.warn("Error deleting template [{}], request was not acknowledged", template); } + tryFinishUpgrade(anyUpgradeFailed); } @Override public void onFailure(Exception e) { - updatesInProgress.decrementAndGet(); + anyUpgradeFailed.set(true); if (e instanceof IndexTemplateMissingException == false) { // we might attempt to delete the same template from different nodes - so that's ok if template doesn't exist // otherwise we need to warn logger.warn(new ParameterizedMessage("Error deleting template [{}]", template), e); } + tryFinishUpgrade(anyUpgradeFailed); } }); } } - int getUpdatesInProgress() { - return updatesInProgress.get(); + void tryFinishUpgrade(AtomicBoolean anyUpgradeFailed) { + assert upgradesInProgress.get() > 0; + if (upgradesInProgress.decrementAndGet() == 1) { + try { + // this is the last upgrade, the templates should now be in the desired state + if (anyUpgradeFailed.get()) { + logger.info("Templates were partially upgraded to version {}", Version.CURRENT); + } else { + logger.info("Templates were upgraded successfuly to version {}", Version.CURRENT); + } + // Check upgraders are satisfied after the update completed. If they still + // report that changes are required, this might indicate a bug or that something + // else tinkering with the templates during the upgrade. + final ImmutableOpenMap upgradedTemplates = + clusterService.state().getMetaData().getTemplates(); + final boolean changesRequired = calculateTemplateChanges(upgradedTemplates).isPresent(); + if (changesRequired) { + logger.warn("Templates are still reported as out of date after the upgrade. The template upgrade will be retried."); + } + } finally { + final int noMoreUpgrades = upgradesInProgress.decrementAndGet(); + assert noMoreUpgrades == 0; + } + } } Optional, Set>> calculateTemplateChanges( diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java index e46f2e06fe1..9ad4aeb69fb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java @@ -35,12 +35,16 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; import java.util.ArrayList; import java.util.Arrays; @@ -52,13 +56,16 @@ import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.IntStream; import static java.util.Collections.emptyMap; +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.test.ClusterServiceUtils.setState; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.CoreMatchers.startsWith; @@ -75,8 +82,20 @@ import static org.mockito.Mockito.when; public class TemplateUpgradeServiceTests extends ESTestCase { - private final ClusterService clusterService = new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, - ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, Collections.emptyMap()); + private ThreadPool threadPool; + private ClusterService clusterService; + + @Before + public void setUpTest() throws Exception { + threadPool = new TestThreadPool("TemplateUpgradeServiceTests"); + clusterService = createClusterService(threadPool); + } + + @After + public void tearDownTest() throws Exception { + threadPool.shutdownNow(); + clusterService.close(); + } public void testCalculateChangesAddChangeAndDelete() { @@ -90,7 +109,7 @@ public class TemplateUpgradeServiceTests extends ESTestCase { IndexTemplateMetaData.builder("changed_test_template").patterns(randomIndexPatterns()).build() ); - TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, null, clusterService, null, + final TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, null, clusterService, threadPool, Arrays.asList( templates -> { if (shouldAdd) { @@ -190,18 +209,18 @@ public class TemplateUpgradeServiceTests extends ESTestCase { additions.put("add_template_" + i, new BytesArray("{\"index_patterns\" : \"*\", \"order\" : " + i + "}")); } - ThreadPool threadPool = mock(ThreadPool.class); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - when(threadPool.getThreadContext()).thenReturn(threadContext); - TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, threadPool, + final TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, threadPool, Collections.emptyList()); - IllegalStateException ise = expectThrows(IllegalStateException.class, () -> service.updateTemplates(additions, deletions)); + IllegalStateException ise = expectThrows(IllegalStateException.class, () -> service.upgradeTemplates(additions, deletions)); assertThat(ise.getMessage(), containsString("template upgrade service should always happen in a system context")); - threadContext.markAsSystemContext(); - service.updateTemplates(additions, deletions); - int updatesInProgress = service.getUpdatesInProgress(); + service.upgradesInProgress.set(additionsCount + deletionsCount + 2); // +2 to skip tryFinishUpgrade + final ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + threadContext.markAsSystemContext(); + service.upgradeTemplates(additions, deletions); + } assertThat(putTemplateListeners, hasSize(additionsCount)); assertThat(deleteTemplateListeners, hasSize(deletionsCount)); @@ -218,30 +237,34 @@ public class TemplateUpgradeServiceTests extends ESTestCase { for (int i = 0; i < deletionsCount; i++) { if (randomBoolean()) { - int prevUpdatesInProgress = service.getUpdatesInProgress(); + int prevUpdatesInProgress = service.upgradesInProgress.get(); deleteTemplateListeners.get(i).onFailure(new RuntimeException("test - ignore")); - assertThat(prevUpdatesInProgress - service.getUpdatesInProgress(), equalTo(1)); + assertThat(prevUpdatesInProgress - service.upgradesInProgress.get(), equalTo(1)); } else { - int prevUpdatesInProgress = service.getUpdatesInProgress(); + int prevUpdatesInProgress = service.upgradesInProgress.get(); deleteTemplateListeners.get(i).onResponse(new DeleteIndexTemplateResponse(randomBoolean()) { }); - assertThat(prevUpdatesInProgress - service.getUpdatesInProgress(), equalTo(1)); + assertThat(prevUpdatesInProgress - service.upgradesInProgress.get(), equalTo(1)); } } - assertThat(updatesInProgress - service.getUpdatesInProgress(), equalTo(additionsCount + deletionsCount)); + // tryFinishUpgrade was skipped + assertThat(service.upgradesInProgress.get(), equalTo(2)); } private static final Set MASTER_DATA_ROLES = Collections.unmodifiableSet(EnumSet.of(DiscoveryNode.Role.MASTER, DiscoveryNode.Role.DATA)); @SuppressWarnings("unchecked") - public void testClusterStateUpdate() { + public void testClusterStateUpdate() throws InterruptedException { - AtomicReference> addedListener = new AtomicReference<>(); - AtomicReference> changedListener = new AtomicReference<>(); - AtomicReference> removedListener = new AtomicReference<>(); - AtomicInteger updateInvocation = new AtomicInteger(); + final AtomicReference> addedListener = new AtomicReference<>(); + final AtomicReference> changedListener = new AtomicReference<>(); + final AtomicReference> removedListener = new AtomicReference<>(); + final Semaphore updateInvocation = new Semaphore(0); + final Semaphore calculateInvocation = new Semaphore(0); + final Semaphore changedInvocation = new Semaphore(0); + final Semaphore finishInvocation = new Semaphore(0); MetaData metaData = randomMetaData( IndexTemplateMetaData.builder("user_template").patterns(randomIndexPatterns()).build(), @@ -249,21 +272,6 @@ public class TemplateUpgradeServiceTests extends ESTestCase { IndexTemplateMetaData.builder("changed_test_template").patterns(randomIndexPatterns()).build() ); - ThreadPool threadPool = mock(ThreadPool.class); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - when(threadPool.getThreadContext()).thenReturn(threadContext); - ExecutorService executorService = mock(ExecutorService.class); - when(threadPool.generic()).thenReturn(executorService); - doAnswer(invocation -> { - Object[] args = invocation.getArguments(); - assert args.length == 1; - assertTrue(threadContext.isSystemContext()); - Runnable runnable = (Runnable) args[0]; - runnable.run(); - updateInvocation.incrementAndGet(); - return null; - }).when(executorService).execute(any(Runnable.class)); - Client mockClient = mock(Client.class); AdminClient mockAdminClient = mock(AdminClient.class); IndicesAdminClient mockIndicesAdminClient = mock(IndicesAdminClient.class); @@ -293,7 +301,7 @@ public class TemplateUpgradeServiceTests extends ESTestCase { return null; }).when(mockIndicesAdminClient).deleteTemplate(any(DeleteIndexTemplateRequest.class), any(ActionListener.class)); - TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, threadPool, + final TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, threadPool, Arrays.asList( templates -> { assertNull(templates.put("added_test_template", IndexTemplateMetaData.builder("added_test_template") @@ -309,26 +317,63 @@ public class TemplateUpgradeServiceTests extends ESTestCase { .patterns(Collections.singletonList("*")).order(10).build())); return templates; } - )); + )) { + + @Override + void tryFinishUpgrade(AtomicBoolean anyUpgradeFailed) { + super.tryFinishUpgrade(anyUpgradeFailed); + finishInvocation.release(); + } + + @Override + void upgradeTemplates(Map changes, Set deletions) { + super.upgradeTemplates(changes, deletions); + updateInvocation.release(); + } + + @Override + Optional, Set>> + calculateTemplateChanges(ImmutableOpenMap templates) { + final Optional, Set>> ans = super.calculateTemplateChanges(templates); + calculateInvocation.release(); + return ans; + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + super.clusterChanged(event); + changedInvocation.release(); + } + }; ClusterState prevState = ClusterState.EMPTY_STATE; ClusterState state = ClusterState.builder(prevState).nodes(DiscoveryNodes.builder() .add(new DiscoveryNode("node1", "node1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT) ).localNodeId("node1").masterNodeId("node1").build() ).metaData(metaData).build(); - service.clusterChanged(new ClusterChangedEvent("test", state, prevState)); + setState(clusterService, state); - assertThat(updateInvocation.get(), equalTo(1)); + changedInvocation.acquire(); + assertThat(changedInvocation.availablePermits(), equalTo(0)); + calculateInvocation.acquire(); + assertThat(calculateInvocation.availablePermits(), equalTo(0)); + updateInvocation.acquire(); + assertThat(updateInvocation.availablePermits(), equalTo(0)); + assertThat(finishInvocation.availablePermits(), equalTo(0)); assertThat(addedListener.get(), notNullValue()); assertThat(changedListener.get(), notNullValue()); assertThat(removedListener.get(), notNullValue()); prevState = state; state = ClusterState.builder(prevState).metaData(MetaData.builder(state.metaData()).removeTemplate("user_template")).build(); - service.clusterChanged(new ClusterChangedEvent("test 2", state, prevState)); + setState(clusterService, state); // Make sure that update wasn't invoked since we are still running - assertThat(updateInvocation.get(), equalTo(1)); + changedInvocation.acquire(); + assertThat(changedInvocation.availablePermits(), equalTo(0)); + assertThat(calculateInvocation.availablePermits(), equalTo(0)); + assertThat(updateInvocation.availablePermits(), equalTo(0)); + assertThat(finishInvocation.availablePermits(), equalTo(0)); addedListener.getAndSet(null).onResponse(new PutIndexTemplateResponse(true) { }); @@ -337,19 +382,40 @@ public class TemplateUpgradeServiceTests extends ESTestCase { removedListener.getAndSet(null).onResponse(new DeleteIndexTemplateResponse(true) { }); - service.clusterChanged(new ClusterChangedEvent("test 3", state, prevState)); + // 3 upgrades should be completed, in addition to the final calculate + finishInvocation.acquire(3); + assertThat(finishInvocation.availablePermits(), equalTo(0)); + calculateInvocation.acquire(); + assertThat(calculateInvocation.availablePermits(), equalTo(0)); + + setState(clusterService, state); // Make sure that update was called this time since we are no longer running - assertThat(updateInvocation.get(), equalTo(2)); + changedInvocation.acquire(); + assertThat(changedInvocation.availablePermits(), equalTo(0)); + calculateInvocation.acquire(); + assertThat(calculateInvocation.availablePermits(), equalTo(0)); + updateInvocation.acquire(); + assertThat(updateInvocation.availablePermits(), equalTo(0)); + assertThat(finishInvocation.availablePermits(), equalTo(0)); addedListener.getAndSet(null).onFailure(new RuntimeException("test - ignore")); changedListener.getAndSet(null).onFailure(new RuntimeException("test - ignore")); removedListener.getAndSet(null).onFailure(new RuntimeException("test - ignore")); - service.clusterChanged(new ClusterChangedEvent("test 3", state, prevState)); + finishInvocation.acquire(3); + assertThat(finishInvocation.availablePermits(), equalTo(0)); + calculateInvocation.acquire(); + assertThat(calculateInvocation.availablePermits(), equalTo(0)); + + setState(clusterService, state); // Make sure that update wasn't called this time since the index template metadata didn't change - assertThat(updateInvocation.get(), equalTo(2)); + changedInvocation.acquire(); + assertThat(changedInvocation.availablePermits(), equalTo(0)); + assertThat(calculateInvocation.availablePermits(), equalTo(0)); + assertThat(updateInvocation.availablePermits(), equalTo(0)); + assertThat(finishInvocation.availablePermits(), equalTo(0)); } private static final int NODE_TEST_ITERS = 100; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java index db7475a8972..1976722d65f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java @@ -992,24 +992,22 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail, Cl } public static Settings customAuditIndexSettings(Settings nodeSettings, Logger logger) { - Settings newSettings = Settings.builder() + final Settings newSettings = Settings.builder() .put(INDEX_SETTINGS.get(nodeSettings), false) + .normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX) .build(); if (newSettings.names().isEmpty()) { return Settings.EMPTY; } - // Filter out forbidden settings: - Settings.Builder builder = Settings.builder(); - builder.put(newSettings.filter(k -> { - String name = "index." + k; + // Filter out forbidden setting + return Settings.builder().put(newSettings.filter(name -> { if (FORBIDDEN_INDEX_SETTING.equals(name)) { logger.warn("overriding the default [{}} setting is forbidden. ignoring...", name); return false; } return true; - })); - return builder.build(); + })).build(); } private void putTemplate(Settings customSettings, Consumer consumer) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java index dab3d023f65..bc27e4cde40 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java @@ -6,10 +6,14 @@ package org.elasticsearch.xpack.security.audit.index; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; @@ -17,6 +21,8 @@ import org.elasticsearch.client.Requests; import org.elasticsearch.client.transport.NoNodeAvailableException; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; @@ -29,6 +35,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.plugins.MetaDataUpgrader; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.search.SearchHit; @@ -70,7 +77,9 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.function.Function; +import static java.util.Collections.emptyMap; import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; import static org.elasticsearch.test.InternalTestCluster.clusterName; @@ -85,6 +94,7 @@ import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.hasSize; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -360,6 +370,21 @@ public class IndexAuditTrailTests extends SecurityIntegTestCase { auditor.start(); } + public void testIndexTemplateUpgrader() throws Exception { + final MetaDataUpgrader metaDataUpgrader = internalCluster().getInstance(MetaDataUpgrader.class); + final Map updatedTemplates = metaDataUpgrader.indexTemplateMetaDataUpgraders.apply(emptyMap()); + final IndexTemplateMetaData indexAuditTrailTemplate = updatedTemplates.get(IndexAuditTrail.INDEX_TEMPLATE_NAME); + assertThat(indexAuditTrailTemplate, notNullValue()); + // test custom index settings override template + assertThat(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexAuditTrailTemplate.settings()), is(numReplicas)); + assertThat(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexAuditTrailTemplate.settings()), is(numShards)); + // test upgrade template and installed template are equal + final GetIndexTemplatesRequest request = new GetIndexTemplatesRequest(IndexAuditTrail.INDEX_TEMPLATE_NAME); + final GetIndexTemplatesResponse response = client().admin().indices().getTemplates(request).get(); + assertThat(response.getIndexTemplates(), hasSize(1)); + assertThat(indexAuditTrailTemplate, is(response.getIndexTemplates().get(0))); + } + public void testProcessorsSetting() { final boolean explicitProcessors = randomBoolean(); final int processors; From 3767bdc98d974282e4b759fe9a072b0d188fe4f2 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 6 Jun 2018 13:21:16 +0100 Subject: [PATCH 13/22] [ML][DOCS] Add example of top N derivative aggregation (#31109) Add example of top N derivative aggregation to the ML datafeed docs --- x-pack/docs/build.gradle | 53 ++++++++++- x-pack/docs/en/ml/aggregations.asciidoc | 112 ++++++++++++++++++------ 2 files changed, 136 insertions(+), 29 deletions(-) diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 17e0f2b70fd..3d799c8d0b5 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -16,7 +16,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/ml/functions/rare.asciidoc', 'en/ml/functions/sum.asciidoc', 'en/ml/functions/time.asciidoc', - 'en/ml/aggregations.asciidoc', 'en/ml/customurl.asciidoc', 'en/monitoring/indices.asciidoc', 'en/rest-api/security/ssl.asciidoc', @@ -281,6 +280,58 @@ setups['library'] = ''' {"name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288} ''' +setups['farequote_index'] = ''' + - do: + indices.create: + index: farequote + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + metric: + properties: + time: + type: date + responsetime: + type: float + airline: + type: keyword +''' +setups['farequote_data'] = setups['farequote_index'] + ''' + - do: + bulk: + index: farequote + type: metric + refresh: true + body: | + {"index": {"_id":"1"}} + {"airline":"JZA","responsetime":990.4628,"time":"2016-02-07T00:00:00+0000"} + {"index": {"_id":"2"}} + {"airline":"JBU","responsetime":877.5927,"time":"2016-02-07T00:00:00+0000"} + {"index": {"_id":"3"}} + {"airline":"KLM","responsetime":1355.4812,"time":"2016-02-07T00:00:00+0000"} +''' +setups['farequote_job'] = setups['farequote_data'] + ''' + - do: + xpack.ml.put_job: + job_id: "farequote" + body: > + { + "analysis_config": { + "bucket_span": "60m", + "detectors": [{ + "function": "mean", + "field_name": "responsetime", + "by_field_name": "airline" + }], + "summary_count_field_name": "doc_count" + }, + "data_description": { + "time_field": "time" + } + } +''' setups['server_metrics_index'] = ''' - do: indices.create: diff --git a/x-pack/docs/en/ml/aggregations.asciidoc b/x-pack/docs/en/ml/aggregations.asciidoc index cc98a45d11e..f3b8e6b3e34 100644 --- a/x-pack/docs/en/ml/aggregations.asciidoc +++ b/x-pack/docs/en/ml/aggregations.asciidoc @@ -11,11 +11,12 @@ aggregated data into {xpackml} instead of raw results, which reduces the volume of data that must be considered while detecting anomalies. There are some limitations to using aggregations in {dfeeds}, however. -Your aggregation must include a buckets aggregation, which in turn must contain -a date histogram aggregation. This requirement ensures that the aggregated -data is a time series. If you use a terms aggregation and the cardinality of a -term is high, then the aggregation might not be effective and you might want -to just use the default search and scroll behavior. +Your aggregation must include a `date_histogram` aggregation, which in turn must +contain a `max` aggregation on the time field. This requirement ensures that the +aggregated data is a time series and the timestamp of each bucket is the time +of the last record in the bucket. If you use a terms aggregation and the +cardinality of a term is high, then the aggregation might not be effective and +you might want to just use the default search and scroll behavior. When you create or update a job, you can include the names of aggregations, for example: @@ -27,9 +28,9 @@ PUT _xpack/ml/anomaly_detectors/farequote "analysis_config": { "bucket_span": "60m", "detectors": [{ - "function":"mean", - "field_name":"responsetime", - "by_field_name":"airline" + "function": "mean", + "field_name": "responsetime", + "by_field_name": "airline" }], "summary_count_field_name": "doc_count" }, @@ -38,6 +39,8 @@ PUT _xpack/ml/anomaly_detectors/farequote } } ---------------------------------- +// CONSOLE +// TEST[setup:farequote_data] In this example, the `airline`, `responsetime`, and `time` fields are aggregations. @@ -85,7 +88,8 @@ PUT _xpack/ml/datafeeds/datafeed-farequote } } ---------------------------------- - +// CONSOLE +// TEST[setup:farequote_job] In this example, the aggregations have names that match the fields that they operate on. That is to say, the `max` aggregation is named `time` and its @@ -100,35 +104,86 @@ For all other aggregations, if the aggregation name doesn't match the field name there are limitations in the drill-down functionality within the {ml} page in {kib}. +{dfeeds} support complex nested aggregations, this example uses the `derivative` +pipeline aggregation to find the 1st order derivative of the counter +`system.network.out.bytes` for each value of the field `beat.name`. + +[source,js] +---------------------------------- +"aggregations": { + "beat.name": { + "terms": { + "field": "beat.name" + }, + "aggregations": { + "buckets": { + "date_histogram": { + "field": "@timestamp", + "interval": "5m" + }, + "aggregations": { + "@timestamp": { + "max": { + "field": "@timestamp" + } + }, + "bytes_out_average": { + "avg": { + "field": "system.network.out.bytes" + } + }, + "bytes_out_derivative": { + "derivative": { + "buckets_path": "bytes_out_average" + } + } + } + } + } + } +} +---------------------------------- +// NOTCONSOLE + When you define an aggregation in a {dfeed}, it must have the following form: [source,js] ---------------------------------- -"aggregations" : { - "buckets" : { - "date_histogram" : { - "time_zone": "UTC", ... +"aggregations": { + ["bucketing_aggregation": { + "bucket_agg": { + ... }, - "aggregations": { - "": { - "max": { - "field":"" + "aggregations": {] + "data_histogram_aggregation": { + "date_histogram": { + "field": "time", + }, + "aggregations": { + "timestamp": { + "max": { + "field": "time" + } + }, + [,"": { + "terms":{... + } + [,"aggregations" : { + []+ + } ] + }] } } - [,"": { - "terms":{... - } - [,"aggregations" : { - []+ - } ] - }] - } - } + } + } } ---------------------------------- +// NOTCONSOLE -You must specify `buckets` as the aggregation name and `date_histogram` as the -aggregation type. For more information, see +The top level aggregation must be either a {ref}/search-aggregations-bucket.html[Bucket Aggregation] +containing as single sub-aggregation that is a `date_histogram` or the top level aggregation +is the required `date_histogram`. There must be exactly 1 `date_histogram` aggregation. +For more information, see {ref}/search-aggregations-bucket-datehistogram-aggregation.html[Date Histogram Aggregation]. NOTE: The `time_zone` parameter in the date histogram aggregation must be set to `UTC`, @@ -163,6 +218,7 @@ GET .../_search { } } -------------------------------------------------- +// NOTCONSOLE By default, {es} limits the maximum number of terms returned to 10000. For high cardinality fields, the query might not run. It might return errors related to From a9af5ca6387cc0fa6d4e1425f57034d94713a328 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 6 Jun 2018 14:32:37 +0200 Subject: [PATCH 14/22] [TEST] Reenable UnicastZenPingTests.testSimplePings --- .../discovery/zen/UnicastZenPingTests.java | 62 ++++++++++++------- 1 file changed, 40 insertions(+), 22 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java index f209f771ab0..f71ffe28b50 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.Constants; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -94,6 +95,7 @@ import static java.util.Collections.emptySet; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; @@ -137,7 +139,6 @@ public class UnicastZenPingTests extends ESTestCase { private static final UnicastHostsProvider EMPTY_HOSTS_PROVIDER = Collections::emptyList; - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/28685") public void testSimplePings() throws IOException, InterruptedException, ExecutionException { // use ephemeral ports final Settings settings = Settings.builder().put("cluster.name", "test").put(TcpTransport.PORT.getKey(), 0).build(); @@ -233,9 +234,9 @@ public class UnicastZenPingTests extends ESTestCase { ZenPing.PingResponse ping = pingResponses.iterator().next(); assertThat(ping.node().getId(), equalTo("UZP_B")); assertThat(ping.getClusterStateVersion(), equalTo(state.version())); - assertPingCount(handleA, handleB, 3); - assertPingCount(handleA, handleC, 0); // mismatch, shouldn't ping - assertPingCount(handleA, handleD, 0); // mismatch, shouldn't ping + assertPings(handleA, handleB); + assertNoPings(handleA, handleC); // mismatch, shouldn't ping + assertNoPings(handleA, handleD); // mismatch, shouldn't ping // ping again, this time from B, logger.info("ping from UZP_B"); @@ -244,23 +245,23 @@ public class UnicastZenPingTests extends ESTestCase { ping = pingResponses.iterator().next(); assertThat(ping.node().getId(), equalTo("UZP_A")); assertThat(ping.getClusterStateVersion(), equalTo(ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION)); - assertPingCount(handleB, handleA, 3); - assertPingCount(handleB, handleC, 0); // mismatch, shouldn't ping - assertPingCount(handleB, handleD, 0); // mismatch, shouldn't ping + assertPings(handleB, handleA); + assertNoPings(handleB, handleC); // mismatch, shouldn't ping + assertNoPings(handleB, handleD); // mismatch, shouldn't ping logger.info("ping from UZP_C"); pingResponses = zenPingC.pingAndWait().toList(); assertThat(pingResponses.size(), equalTo(1)); - assertPingCount(handleC, handleA, 0); - assertPingCount(handleC, handleB, 0); - assertPingCount(handleC, handleD, 3); + assertNoPings(handleC, handleA); + assertNoPings(handleC, handleB); + assertPings(handleC, handleD); logger.info("ping from UZP_D"); pingResponses = zenPingD.pingAndWait().toList(); assertThat(pingResponses.size(), equalTo(1)); - assertPingCount(handleD, handleA, 0); - assertPingCount(handleD, handleB, 0); - assertPingCount(handleD, handleC, 3); + assertNoPings(handleD, handleA); + assertNoPings(handleD, handleB); + assertPings(handleD, handleC); zenPingC.close(); handleD.counters.clear(); @@ -268,9 +269,9 @@ public class UnicastZenPingTests extends ESTestCase { pingResponses = zenPingD.pingAndWait().toList(); // check that node does not respond to pings anymore after the ping service has been closed assertThat(pingResponses.size(), equalTo(0)); - assertPingCount(handleD, handleA, 0); - assertPingCount(handleD, handleB, 0); - assertPingCount(handleD, handleC, 3); + assertNoPings(handleD, handleA); + assertNoPings(handleD, handleB); + assertPings(handleD, handleC); } public void testUnknownHostNotCached() throws ExecutionException, InterruptedException { @@ -353,8 +354,8 @@ public class UnicastZenPingTests extends ESTestCase { ZenPing.PingResponse ping = pingResponses.iterator().next(); assertThat(ping.node().getId(), equalTo("UZP_C")); assertThat(ping.getClusterStateVersion(), equalTo(state.version())); - assertPingCount(handleA, handleB, 0); - assertPingCount(handleA, handleC, 3); + assertNoPings(handleA, handleB); + assertPings(handleA, handleC); assertNull(handleA.counters.get(handleB.address)); } @@ -377,8 +378,8 @@ public class UnicastZenPingTests extends ESTestCase { assertThat(secondPingResponses.size(), equalTo(2)); final Set ids = new HashSet<>(secondPingResponses.stream().map(p -> p.node().getId()).collect(Collectors.toList())); assertThat(ids, equalTo(new HashSet<>(Arrays.asList("UZP_B", "UZP_C")))); - assertPingCount(handleA, handleB, 3); - assertPingCount(handleA, handleC, 3); + assertPings(handleA, handleB); + assertPings(handleA, handleC); } } @@ -745,13 +746,30 @@ public class UnicastZenPingTests extends ESTestCase { verify(logger).warn(eq("failed to resolve host [127.0.0.1:9300:9300]"), Matchers.any(ExecutionException.class)); } - private void assertPingCount(final NetworkHandle fromNode, final NetworkHandle toNode, int expectedCount) { + private void assertNoPings(final NetworkHandle fromNode, final NetworkHandle toNode) { final AtomicInteger counter = fromNode.counters.getOrDefault(toNode.address, new AtomicInteger()); final String onNodeName = fromNode.node.getName(); assertNotNull("handle for [" + onNodeName + "] has no 'expected' counter", counter); final String forNodeName = toNode.node.getName(); assertThat("node [" + onNodeName + "] ping count to [" + forNodeName + "] is unexpected", - counter.get(), equalTo(expectedCount)); + counter.get(), equalTo(0)); + } + + private void assertPings(final NetworkHandle fromNode, final NetworkHandle toNode) { + final AtomicInteger counter = fromNode.counters.getOrDefault(toNode.address, new AtomicInteger()); + final String onNodeName = fromNode.node.getName(); + assertNotNull("handle for [" + onNodeName + "] has no 'expected' counter", counter); + final String forNodeName = toNode.node.getName(); + if (Constants.WINDOWS) { + // Some of the ping attempts seem to sporadically fail on Windows (see https://github.com/elastic/elasticsearch/issues/28685) + // Anyhow, the point of the test is not to assert the exact number of pings, but to check if pinging has taken place or not + assertThat("node [" + onNodeName + "] ping count to [" + forNodeName + "] is unexpected", + counter.get(), greaterThan(0)); + } else { + assertThat("node [" + onNodeName + "] ping count to [" + forNodeName + "] is unexpected", + counter.get(), equalTo(3)); + } + } private NetworkHandle startServices( From f4a412fe21b2ae595306e6bd261f5fc4d6374388 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 6 Jun 2018 16:13:02 +0200 Subject: [PATCH 15/22] Remove RestGetAllMappingsAction (#31129) We currently have a specific REST action to retrieve all indices and types mappings, which used internally the get index API. This doesn't seem to be required anymore though as the existing RestGetMappingAction could as well take the requests with no indices and types specified. This commit removes the RestGetAllMappingsAction in favour of using RestGetMappingAction also for requests that don't specify indices nor types. --- .../elasticsearch/action/ActionModule.java | 12 +- .../indices/RestGetAllMappingsAction.java | 109 ------------------ .../admin/indices/RestGetMappingAction.java | 5 +- 3 files changed, 7 insertions(+), 119 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllMappingsAction.java diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index fa4d751a54a..235effdcf44 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -206,6 +206,10 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.persistent.CompletionPersistentTaskAction; +import org.elasticsearch.persistent.RemovePersistentTaskAction; +import org.elasticsearch.persistent.StartPersistentTaskAction; +import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ActionPlugin.ActionHandler; import org.elasticsearch.rest.RestController; @@ -241,7 +245,6 @@ import org.elasticsearch.rest.action.admin.cluster.RestRemoteClusterInfoAction; import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction; import org.elasticsearch.rest.action.admin.cluster.RestVerifyRepositoryAction; -import org.elasticsearch.rest.action.admin.indices.RestResizeHandler; import org.elasticsearch.rest.action.admin.indices.RestAnalyzeAction; import org.elasticsearch.rest.action.admin.indices.RestClearIndicesCacheAction; import org.elasticsearch.rest.action.admin.indices.RestCloseIndexAction; @@ -252,7 +255,6 @@ import org.elasticsearch.rest.action.admin.indices.RestFlushAction; import org.elasticsearch.rest.action.admin.indices.RestForceMergeAction; import org.elasticsearch.rest.action.admin.indices.RestGetAliasesAction; import org.elasticsearch.rest.action.admin.indices.RestGetAllAliasesAction; -import org.elasticsearch.rest.action.admin.indices.RestGetAllMappingsAction; import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndexTemplateAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndicesAction; @@ -269,6 +271,7 @@ import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.rest.action.admin.indices.RestPutMappingAction; import org.elasticsearch.rest.action.admin.indices.RestRecoveryAction; import org.elasticsearch.rest.action.admin.indices.RestRefreshAction; +import org.elasticsearch.rest.action.admin.indices.RestResizeHandler; import org.elasticsearch.rest.action.admin.indices.RestRolloverIndexAction; import org.elasticsearch.rest.action.admin.indices.RestSyncedFlushAction; import org.elasticsearch.rest.action.admin.indices.RestUpdateSettingsAction; @@ -313,10 +316,6 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.rest.action.search.RestSearchScrollAction; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.usage.UsageService; -import org.elasticsearch.persistent.CompletionPersistentTaskAction; -import org.elasticsearch.persistent.RemovePersistentTaskAction; -import org.elasticsearch.persistent.StartPersistentTaskAction; -import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction; import java.util.ArrayList; import java.util.Collections; @@ -556,7 +555,6 @@ public class ActionModule extends AbstractModule { registerHandler.accept(new RestSnapshotsStatusAction(settings, restController)); registerHandler.accept(new RestGetAllAliasesAction(settings, restController)); - registerHandler.accept(new RestGetAllMappingsAction(settings, restController)); registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter)); registerHandler.accept(new RestIndicesStatsAction(settings, restController)); registerHandler.accept(new RestIndicesSegmentsAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllMappingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllMappingsAction.java deleted file mode 100644 index 9892717cd77..00000000000 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllMappingsAction.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.admin.indices; - -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.cluster.metadata.AliasMetaData; -import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.settings.IndexScopedSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.common.xcontent.ToXContent.Params; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; - -import java.io.IOException; -import java.util.List; -import java.util.Set; - -import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestRequest.Method.HEAD; -import static org.elasticsearch.rest.RestStatus.OK; - -/** - * The REST handler for retrieving all mappings - */ -public class RestGetAllMappingsAction extends BaseRestHandler { - - public RestGetAllMappingsAction(final Settings settings, final RestController controller) { - super(settings); - controller.registerHandler(GET, "/_mapping", this); - controller.registerHandler(GET, "/_mappings", this); - } - - @Override - public String getName() { - return "get_all_mappings_action"; - } - - @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final GetIndexRequest getIndexRequest = new GetIndexRequest(); - getIndexRequest.indices(Strings.EMPTY_ARRAY); - getIndexRequest.features(Feature.MAPPINGS); - getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions())); - getIndexRequest.local(request.paramAsBoolean("local", getIndexRequest.local())); - getIndexRequest.humanReadable(request.paramAsBoolean("human", false)); - return channel -> client.admin().indices().getIndex(getIndexRequest, new RestBuilderListener(channel) { - - @Override - public RestResponse buildResponse(final GetIndexResponse response, final XContentBuilder builder) throws Exception { - builder.startObject(); - { - for (final String index : response.indices()) { - builder.startObject(index); - { - writeMappings(response.mappings().get(index), builder); - } - builder.endObject(); - } - } - builder.endObject(); - - return new BytesRestResponse(OK, builder); - } - - private void writeMappings(final ImmutableOpenMap mappings, - final XContentBuilder builder) throws IOException { - builder.startObject("mappings"); - { - for (final ObjectObjectCursor typeEntry : mappings) { - builder.field(typeEntry.key); - builder.map(typeEntry.value.sourceAsMap()); - } - } - builder.endObject(); - } - }); - } - -} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index 46388e6947f..08f8449b701 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -20,8 +20,6 @@ package org.elasticsearch.rest.action.admin.indices; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -56,12 +54,13 @@ import java.util.stream.Collectors; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.HEAD; -import static org.elasticsearch.rest.RestStatus.OK; public class RestGetMappingAction extends BaseRestHandler { public RestGetMappingAction(final Settings settings, final RestController controller) { super(settings); + controller.registerHandler(GET, "/_mapping", this); + controller.registerHandler(GET, "/_mappings", this); controller.registerHandler(GET, "/{index}/{type}/_mapping", this); controller.registerHandler(GET, "/{index}/_mappings", this); controller.registerHandler(GET, "/{index}/_mapping", this); From 8aa58887e2a15ebe49358b0a33f1f4ec81dd5836 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Wed, 6 Jun 2018 08:18:56 -0600 Subject: [PATCH 16/22] Security: make native realm usage stats accurate (#30824) The native realm's usage stats were previously pulled from the cache, which only contains the number of users that had authenticated in the past 20 minutes. This commit changes this so that we pull the current value from the security index by executing a search request. In order to support this, the usage stats for realms is now asynchronous so that we do not block while waiting on the search to complete. --- .../security/SecurityFeatureSetUsage.java | 4 + .../xpack/core/security/authc/Realm.java | 4 +- .../xpack/security/SecurityFeatureSet.java | 20 ++-- .../xpack/security/authc/Realms.java | 99 ++++++++++++------- .../security/authc/esnative/NativeRealm.java | 12 +++ .../authc/esnative/NativeUsersStore.java | 24 +++++ .../xpack/security/authc/file/FileRealm.java | 10 +- .../xpack/security/authc/ldap/LdapRealm.java | 14 +-- .../support/CachingUsernamePasswordRealm.java | 14 ++- .../security/authz/store/FileRolesStore.java | 2 +- .../authz/store/NativeRolesStore.java | 96 +++++++++--------- .../security/SecurityFeatureSetTests.java | 6 +- .../xpack/security/authc/RealmsTests.java | 15 ++- .../authc/esnative/NativeRealmIntegTests.java | 27 +++++ .../security/authc/file/FileRealmTests.java | 4 +- .../authc/ldap/ActiveDirectoryRealmTests.java | 4 +- .../security/authc/ldap/LdapRealmTests.java | 4 +- 17 files changed, 240 insertions(+), 119 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java index b549cffc0cc..f615fbd0b53 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java @@ -92,4 +92,8 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage { builder.field(ANONYMOUS_XFIELD, anonymousUsage); } } + + public Map getRealmsUsage() { + return Collections.unmodifiableMap(realmsUsage); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java index 234141c77c9..3e92be2ef90 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java @@ -119,11 +119,11 @@ public abstract class Realm implements Comparable { */ public abstract void lookupUser(String username, ActionListener listener); - public Map usageStats() { + public void usageStats(ActionListener> listener) { Map stats = new HashMap<>(); stats.put("name", name()); stats.put("order", order()); - return stats; + listener.onResponse(stats); } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java index 1be3b4cd679..ab70b8513de 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java @@ -86,7 +86,6 @@ public class SecurityFeatureSet implements XPackFeatureSet { @Override public void usage(ActionListener listener) { - Map realmsUsage = buildRealmsUsage(realms); Map sslUsage = sslUsage(settings); Map auditUsage = auditUsage(settings); Map ipFilterUsage = ipFilterUsage(ipFilter); @@ -94,10 +93,11 @@ public class SecurityFeatureSet implements XPackFeatureSet { final AtomicReference> rolesUsageRef = new AtomicReference<>(); final AtomicReference> roleMappingUsageRef = new AtomicReference<>(); - final CountDown countDown = new CountDown(2); + final AtomicReference> realmsUsageRef = new AtomicReference<>(); + final CountDown countDown = new CountDown(3); final Runnable doCountDown = () -> { if (countDown.countDown()) { - listener.onResponse(new SecurityFeatureSetUsage(available(), enabled(), realmsUsage, + listener.onResponse(new SecurityFeatureSetUsage(available(), enabled(), realmsUsageRef.get(), rolesUsageRef.get(), roleMappingUsageRef.get(), sslUsage, auditUsage, ipFilterUsage, anonymousUsage)); } @@ -116,6 +116,12 @@ public class SecurityFeatureSet implements XPackFeatureSet { doCountDown.run(); }, listener::onFailure); + final ActionListener> realmsUsageListener = + ActionListener.wrap(realmsUsage -> { + realmsUsageRef.set(realmsUsage); + doCountDown.run(); + }, listener::onFailure); + if (rolesStore == null) { rolesStoreUsageListener.onResponse(Collections.emptyMap()); } else { @@ -126,13 +132,11 @@ public class SecurityFeatureSet implements XPackFeatureSet { } else { roleMappingStore.usageStats(roleMappingStoreUsageListener); } - } - - static Map buildRealmsUsage(Realms realms) { if (realms == null) { - return Collections.emptyMap(); + realmsUsageListener.onResponse(Collections.emptyMap()); + } else { + realms.usageStats(realmsUsageListener); } - return realms.usageStats(); } static Map sslUsage(Settings settings) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java index 38319597523..0284ae9a05f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java @@ -15,12 +15,16 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; import org.elasticsearch.license.XPackLicenseState; @@ -188,46 +192,67 @@ public class Realms extends AbstractComponent implements Iterable { return realms; } - public Map usageStats() { + public void usageStats(ActionListener> listener) { Map realmMap = new HashMap<>(); - for (Realm realm : this) { - if (ReservedRealm.TYPE.equals(realm.type())) { - continue; + final AtomicBoolean failed = new AtomicBoolean(false); + final List realmList = asList().stream() + .filter(r -> ReservedRealm.TYPE.equals(r.type()) == false) + .collect(Collectors.toList()); + final CountDown countDown = new CountDown(realmList.size()); + final Runnable doCountDown = () -> { + if ((realmList.isEmpty() || countDown.countDown()) && failed.get() == false) { + final AllowedRealmType allowedRealmType = licenseState.allowedRealmType(); + // iterate over the factories so we can add enabled & available info + for (String type : factories.keySet()) { + assert ReservedRealm.TYPE.equals(type) == false; + realmMap.compute(type, (key, value) -> { + if (value == null) { + return MapBuilder.newMapBuilder() + .put("enabled", false) + .put("available", isRealmTypeAvailable(allowedRealmType, type)) + .map(); + } + + assert value instanceof Map; + Map realmTypeUsage = (Map) value; + realmTypeUsage.put("enabled", true); + // the realms iterator returned this type so it must be enabled + assert isRealmTypeAvailable(allowedRealmType, type); + realmTypeUsage.put("available", true); + return value; + }); + } + listener.onResponse(realmMap); + } + }; + + if (realmList.isEmpty()) { + doCountDown.run(); + } else { + for (Realm realm : realmList) { + realm.usageStats(ActionListener.wrap(stats -> { + if (failed.get() == false) { + synchronized (realmMap) { + realmMap.compute(realm.type(), (key, value) -> { + if (value == null) { + Object realmTypeUsage = convertToMapOfLists(stats); + return realmTypeUsage; + } + assert value instanceof Map; + combineMaps((Map) value, stats); + return value; + }); + } + doCountDown.run(); + } + }, + e -> { + if (failed.compareAndSet(false, true)) { + listener.onFailure(e); + } + })); } - realmMap.compute(realm.type(), (key, value) -> { - if (value == null) { - Object realmTypeUsage = convertToMapOfLists(realm.usageStats()); - return realmTypeUsage; - } - assert value instanceof Map; - combineMaps((Map) value, realm.usageStats()); - return value; - }); } - - final AllowedRealmType allowedRealmType = licenseState.allowedRealmType(); - // iterate over the factories so we can add enabled & available info - for (String type : factories.keySet()) { - assert ReservedRealm.TYPE.equals(type) == false; - realmMap.compute(type, (key, value) -> { - if (value == null) { - return MapBuilder.newMapBuilder() - .put("enabled", false) - .put("available", isRealmTypeAvailable(allowedRealmType, type)) - .map(); - } - - assert value instanceof Map; - Map realmTypeUsage = (Map) value; - realmTypeUsage.put("enabled", true); - // the realms iterator returned this type so it must be enabled - assert isRealmTypeAvailable(allowedRealmType, type); - realmTypeUsage.put("available", true); - return value; - }); - } - - return realmMap; } private void addNativeRealms(List realms) throws Exception { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java index af2bfcf0d6c..a84b76beab8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java @@ -15,6 +15,8 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.support.SecurityIndexManager; +import java.util.Map; + import static org.elasticsearch.xpack.security.support.SecurityIndexManager.isIndexDeleted; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.isMoveFromRedToNonRed; @@ -46,6 +48,16 @@ public class NativeRealm extends CachingUsernamePasswordRealm { } } + @Override + public void usageStats(ActionListener> listener) { + super.usageStats(ActionListener.wrap(stats -> + userStore.getUserCount(ActionListener.wrap(size -> { + stats.put("size", size); + listener.onResponse(stats); + }, listener::onFailure)) + , listener::onFailure)); + } + // method is used for testing to verify cache expiration since expireAll is final void clearCache() { expireAll(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java index 1477c6dc880..72a65b8213f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java @@ -150,6 +150,30 @@ public class NativeUsersStore extends AbstractComponent { } } + void getUserCount(final ActionListener listener) { + if (securityIndex.indexExists() == false) { + listener.onResponse(0L); + } else { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareSearch(SECURITY_INDEX_NAME) + .setQuery(QueryBuilders.termQuery(Fields.TYPE.getPreferredName(), USER_DOC_TYPE)) + .setSize(0) + .request(), + new ActionListener() { + @Override + public void onResponse(SearchResponse response) { + listener.onResponse(response.getHits().getTotalHits()); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, client::search)); + } + } + /** * Async method to retrieve a user and their password */ diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java index 88656b9e01e..e2586ea836d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java @@ -55,11 +55,11 @@ public class FileRealm extends CachingUsernamePasswordRealm { } @Override - public Map usageStats() { - Map stats = super.usageStats(); - // here we can determine the size based on the in mem user store - stats.put("size", userPasswdStore.usersCount()); - return stats; + public void usageStats(ActionListener> listener) { + super.usageStats(ActionListener.wrap(stats -> { + stats.put("size", userPasswdStore.usersCount()); + listener.onResponse(stats); + }, listener::onFailure)); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java index a7c6efdda31..87749850141 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java @@ -160,12 +160,14 @@ public final class LdapRealm extends CachingUsernamePasswordRealm { } @Override - public Map usageStats() { - Map usage = super.usageStats(); - usage.put("load_balance_type", LdapLoadBalancing.resolve(config.settings()).toString()); - usage.put("ssl", sessionFactory.isSslUsed()); - usage.put("user_search", LdapUserSearchSessionFactory.hasUserSearchSettings(config)); - return usage; + public void usageStats(ActionListener> listener) { + super.usageStats(ActionListener.wrap(usage -> { + usage.put("size", getCacheSize()); + usage.put("load_balance_type", LdapLoadBalancing.resolve(config.settings()).toString()); + usage.put("ssl", sessionFactory.isSslUsed()); + usage.put("user_search", LdapUserSearchSessionFactory.hasUserSearchSettings(config)); + listener.onResponse(usage); + }, listener::onFailure)); } private static void buildUser(LdapSession session, String username, ActionListener listener, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java index 8dae5275eda..e9c107abcce 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.user.User; +import java.util.Collections; import java.util.Map; import java.util.Objects; import java.util.concurrent.ExecutionException; @@ -177,10 +178,15 @@ public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm } @Override - public Map usageStats() { - Map stats = super.usageStats(); - stats.put("size", cache.count()); - return stats; + public void usageStats(ActionListener> listener) { + super.usageStats(ActionListener.wrap(stats -> { + stats.put("cache", Collections.singletonMap("size", getCacheSize())); + listener.onResponse(stats); + }, listener::onFailure)); + } + + protected int getCacheSize() { + return cache.count(); } protected abstract void doAuthenticate(UsernamePasswordToken token, ActionListener listener); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java index f2d78806da0..59bc8042fba 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java @@ -87,7 +87,7 @@ public class FileRolesStore extends AbstractComponent { } public Map usageStats() { - Map usageStats = new HashMap<>(); + Map usageStats = new HashMap<>(3); usageStats.put("size", permissions.size()); boolean dls = false; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index b1e5170a202..9093b6a6673 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -195,7 +195,7 @@ public class NativeRolesStore extends AbstractComponent { } public void usageStats(ActionListener> listener) { - Map usageStats = new HashMap<>(); + Map usageStats = new HashMap<>(3); if (securityIndex.indexExists() == false) { usageStats.put("size", 0L); usageStats.put("fls", false); @@ -204,56 +204,56 @@ public class NativeRolesStore extends AbstractComponent { } else { securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareMultiSearch() - .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) - .setQuery(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) - .setSize(0)) - .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) - .setQuery(QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) - .must(QueryBuilders.boolQuery() - .should(existsQuery("indices.field_security.grant")) - .should(existsQuery("indices.field_security.except")) - // for backwardscompat with 2.x - .should(existsQuery("indices.fields")))) - .setSize(0) - .setTerminateAfter(1)) - .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) - .setQuery(QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) - .filter(existsQuery("indices.query"))) - .setSize(0) - .setTerminateAfter(1)) - .request(), - new ActionListener() { - @Override - public void onResponse(MultiSearchResponse items) { - Item[] responses = items.getResponses(); - if (responses[0].isFailure()) { - usageStats.put("size", 0); - } else { - usageStats.put("size", responses[0].getResponse().getHits().getTotalHits()); - } - - if (responses[1].isFailure()) { - usageStats.put("fls", false); - } else { - usageStats.put("fls", responses[1].getResponse().getHits().getTotalHits() > 0L); - } - - if (responses[2].isFailure()) { - usageStats.put("dls", false); - } else { - usageStats.put("dls", responses[2].getResponse().getHits().getTotalHits() > 0L); - } - listener.onResponse(usageStats); + client.prepareMultiSearch() + .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) + .setQuery(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) + .setSize(0)) + .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) + .setQuery(QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) + .must(QueryBuilders.boolQuery() + .should(existsQuery("indices.field_security.grant")) + .should(existsQuery("indices.field_security.except")) + // for backwardscompat with 2.x + .should(existsQuery("indices.fields")))) + .setSize(0) + .setTerminateAfter(1)) + .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) + .setQuery(QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) + .filter(existsQuery("indices.query"))) + .setSize(0) + .setTerminateAfter(1)) + .request(), + new ActionListener() { + @Override + public void onResponse(MultiSearchResponse items) { + Item[] responses = items.getResponses(); + if (responses[0].isFailure()) { + usageStats.put("size", 0); + } else { + usageStats.put("size", responses[0].getResponse().getHits().getTotalHits()); } - @Override - public void onFailure(Exception e) { - listener.onFailure(e); + if (responses[1].isFailure()) { + usageStats.put("fls", false); + } else { + usageStats.put("fls", responses[1].getResponse().getHits().getTotalHits() > 0L); } - }, client::multiSearch)); + + if (responses[2].isFailure()) { + usageStats.put("dls", false); + } else { + usageStats.put("dls", responses[2].getResponse().getHits().getTotalHits() > 0L); + } + listener.onResponse(usageStats); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, client::multiSearch)); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java index c169d62c6b1..076ce6c9fcb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java @@ -146,7 +146,11 @@ public class SecurityFeatureSetTests extends ESTestCase { realmUsage.put("key2", Arrays.asList(i)); realmUsage.put("key3", Arrays.asList(i % 2 == 0)); } - when(realms.usageStats()).thenReturn(realmsUsageStats); + doAnswer(invocationOnMock -> { + ActionListener> listener = (ActionListener) invocationOnMock.getArguments()[0]; + listener.onResponse(realmsUsageStats); + return Void.TYPE; + }).when(realms).usageStats(any(ActionListener.class)); final boolean anonymousEnabled = randomBoolean(); if (anonymousEnabled) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java index 2bc3d58471b..ff4c30ddf8c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.security.authc; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; @@ -454,9 +455,11 @@ public class RealmsTests extends ESTestCase { .put("xpack.security.authc.realms.bar.order", "1"); Settings settings = builder.build(); Environment env = TestEnvironment.newEnvironment(settings); - Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm ); + Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); - Map usageStats = realms.usageStats(); + PlainActionFuture> future = new PlainActionFuture<>(); + realms.usageStats(future); + Map usageStats = future.get(); assertThat(usageStats.size(), is(factories.size())); // first check type_0 @@ -482,7 +485,9 @@ public class RealmsTests extends ESTestCase { // disable ALL using license when(licenseState.isAuthAllowed()).thenReturn(false); when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NONE); - usageStats = realms.usageStats(); + future = new PlainActionFuture<>(); + realms.usageStats(future); + usageStats = future.get(); assertThat(usageStats.size(), is(factories.size())); for (Entry entry : usageStats.entrySet()) { Map typeMap = (Map) entry.getValue(); @@ -494,7 +499,9 @@ public class RealmsTests extends ESTestCase { // check native or internal realms enabled only when(licenseState.isAuthAllowed()).thenReturn(true); when(licenseState.allowedRealmType()).thenReturn(randomFrom(AllowedRealmType.NATIVE, AllowedRealmType.DEFAULT)); - usageStats = realms.usageStats(); + future = new PlainActionFuture<>(); + realms.usageStats(future); + usageStats = future.get(); assertThat(usageStats.size(), is(factories.size())); for (Entry entry : usageStats.entrySet()) { final String type = entry.getKey(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index a238576e413..a0550b4c1ce 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -22,6 +22,10 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.NativeRealmIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.action.XPackUsageRequestBuilder; +import org.elasticsearch.xpack.core.action.XPackUsageResponse; +import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleResponse; import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; @@ -49,6 +53,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.CountDownLatch; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -662,6 +667,28 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { assertThat(usage.get("dls"), is(dls)); } + public void testRealmUsageStats() { + final int numNativeUsers = scaledRandomIntBetween(1, 32); + SecurityClient securityClient = new SecurityClient(client()); + for (int i = 0; i < numNativeUsers; i++) { + securityClient.preparePutUser("joe" + i, "s3krit".toCharArray(), "superuser").get(); + } + + XPackUsageResponse response = new XPackUsageRequestBuilder(client()).get(); + Optional securityUsage = response.getUsages().stream() + .filter(usage -> usage instanceof SecurityFeatureSetUsage) + .findFirst(); + assertTrue(securityUsage.isPresent()); + SecurityFeatureSetUsage securityFeatureSetUsage = (SecurityFeatureSetUsage) securityUsage.get(); + Map realmsUsage = securityFeatureSetUsage.getRealmsUsage(); + assertNotNull(realmsUsage); + assertNotNull(realmsUsage.get("native")); + assertNotNull(((Map) realmsUsage.get("native")).get("size")); + List sizeList = (List) ((Map) realmsUsage.get("native")).get("size"); + assertEquals(1, sizeList.size()); + assertEquals(numNativeUsers, Math.toIntExact(sizeList.get(0))); + } + public void testSetEnabled() throws Exception { securityClient().preparePutUser("joe", "s3krit".toCharArray(), SecuritySettingsSource.TEST_ROLE).get(); final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java index b0f53229377..7295e48d003 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java @@ -248,7 +248,9 @@ public class FileRealmTests extends ESTestCase { threadContext); FileRealm realm = new FileRealm(config, userPasswdStore, userRolesStore, threadPool); - Map usage = realm.usageStats(); + PlainActionFuture> future = new PlainActionFuture<>(); + realm.usageStats(future); + Map usage = future.get(); assertThat(usage, is(notNullValue())); assertThat(usage, hasEntry("name", "file-realm")); assertThat(usage, hasEntry("order", order)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java index 52026cc8af5..6ab4dbf3e0c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java @@ -320,7 +320,9 @@ public class ActiveDirectoryRealmTests extends ESTestCase { DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); - Map stats = realm.usageStats(); + PlainActionFuture> future = new PlainActionFuture<>(); + realm.usageStats(future); + Map stats = future.get(); assertThat(stats, is(notNullValue())); assertThat(stats, hasEntry("name", realm.name())); assertThat(stats, hasEntry("order", realm.order())); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java index 042664fa670..ea1b9117922 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java @@ -360,7 +360,9 @@ public class LdapRealmTests extends LdapTestCase { LdapRealm realm = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, new DnRoleMapper(config, resourceWatcherService), threadPool); - Map stats = realm.usageStats(); + PlainActionFuture> future = new PlainActionFuture<>(); + realm.usageStats(future); + Map stats = future.get(); assertThat(stats, is(notNullValue())); assertThat(stats, hasEntry("name", "ldap-realm")); assertThat(stats, hasEntry("order", realm.order())); From 515a23360d4791499e7675c38bbad9e6e244fc7b Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 6 Jun 2018 16:38:06 +0200 Subject: [PATCH 17/22] Do not check for S3 blob to exist before writing (#31128) In #19749 an extra check was added before writing each blob to ensure that we would not be overriding an existing blob. Due to S3's weak consistency model, this check was best effort. To make matters worse, however, this resulted in a HEAD request to be done before every PUT, in particular also when PUTTING a new object. The approach taken in #19749 worsened our consistency guarantees for follow-up snapshot actions, as it made it less likely for new files that had been written to be available for reads. This commit therefore removes this extra check. Due to the weak consistency model, this check was a best effort thing anyway, and there's currently no way to prevent accidental overrides on S3. --- .../org/elasticsearch/repositories/s3/S3BlobContainer.java | 4 ---- .../repositories/s3/S3BlobStoreContainerTests.java | 5 +++++ 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 401ef0933a8..92050e34a5a 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -96,10 +96,6 @@ class S3BlobContainer extends AbstractBlobContainer { @Override public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { - if (blobExists(blobName)) { - throw new FileAlreadyExistsException("Blob [" + blobName + "] already exists, cannot overwrite"); - } - SocketAccess.doPrivilegedIOException(() -> { if (blobSize <= blobStore.bufferSizeInBytes()) { executeSingleUpload(blobStore, buildKey(blobName), inputStream, blobSize); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java index 453ef3213f0..c760e86d135 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java @@ -64,6 +64,11 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase { return randomMockS3BlobStore(); } + @Override + public void testVerifyOverwriteFails() { + assumeFalse("not implemented because of S3's weak consistency model", true); + } + public void testExecuteSingleUploadBlobSizeTooLarge() { final long blobSize = ByteSizeUnit.GB.toBytes(randomIntBetween(6, 10)); final S3BlobStore blobStore = mock(S3BlobStore.class); From 1dca00deb93bdc104f6ebb8814bf43a42b9273d9 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 6 Jun 2018 16:38:37 +0200 Subject: [PATCH 18/22] Remove extra checks from HdfsBlobContainer (#31126) This commit saves one network roundtrip when reading or deleting files from an HDFS repository. --- .../repositories/hdfs/HdfsBlobContainer.java | 24 +++++++++++-------- .../ESBlobStoreContainerTestCase.java | 7 ++++++ 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java index 926cf0b2ad4..1052dc9ded9 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.repositories.hdfs.HdfsBlobStore.Operation; +import java.io.FileNotFoundException; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; @@ -69,11 +70,13 @@ final class HdfsBlobContainer extends AbstractBlobContainer { @Override public void deleteBlob(String blobName) throws IOException { - if (!blobExists(blobName)) { - throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); + try { + if (store.execute(fileContext -> fileContext.delete(new Path(path, blobName), true)) == false) { + throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); + } + } catch (FileNotFoundException fnfe) { + throw new NoSuchFileException("[" + blobName + "] blob not found"); } - - store.execute(fileContext -> fileContext.delete(new Path(path, blobName), true)); } @Override @@ -86,16 +89,17 @@ final class HdfsBlobContainer extends AbstractBlobContainer { @Override public InputStream readBlob(String blobName) throws IOException { - if (!blobExists(blobName)) { - throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); - } // FSDataInputStream does buffering internally // FSDataInputStream can open connections on read() or skip() so we wrap in // HDFSPrivilegedInputSteam which will ensure that underlying methods will // be called with the proper privileges. - return store.execute(fileContext -> - new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize), securityContext) - ); + try { + return store.execute(fileContext -> + new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize), securityContext) + ); + } catch (FileNotFoundException fnfe) { + throw new NoSuchFileException("[" + blobName + "] blob not found"); + } } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java index df2024de445..be06e09c6ff 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java @@ -47,6 +47,13 @@ import static org.hamcrest.CoreMatchers.notNullValue; */ public abstract class ESBlobStoreContainerTestCase extends ESTestCase { + public void testReadNonExistingPath() throws IOException { + try(BlobStore store = newBlobStore()) { + final BlobContainer container = store.blobContainer(new BlobPath()); + expectThrows(NoSuchFileException.class, () -> container.readBlob("non-existing")); + } + } + public void testWriteRead() throws IOException { try(BlobStore store = newBlobStore()) { final BlobContainer container = store.blobContainer(new BlobPath()); From e9fe371e41bcb6f8bc7b90b9cbbd2c74e0145a79 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 6 Jun 2018 16:46:11 +0200 Subject: [PATCH 19/22] Give the engine the whole index buffer size on init. (#31105) Currently the engine is initialized with a hardcoded 256MB of RAM. Elasticsearch may never use more than that for a given shard, `IndexingMemoryController` only has the power to flush segments to disk earlier in case multiple shards are actively indexing and use too much memory. While this amount of memory is enough for an index with few fields and larger RAM buffers are not expected to improve indexing speed, this might actually be little for an index that has many fields. Kudos to @bleskes for finding it out when looking into a user who was reporting a **much** slower indexing speed when upgrading from 2.x to 5.6 with an index that has about 20,000 fields. --- .../index/engine/EngineConfig.java | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index b7c5a416913..2deae61bd52 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -29,8 +29,8 @@ import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.MemorySizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecService; @@ -139,10 +139,20 @@ public final class EngineConfig { this.codecService = codecService; this.eventListener = eventListener; codecName = indexSettings.getValue(INDEX_CODEC_SETTING); - // We give IndexWriter a "huge" (256 MB) buffer, so it won't flush on its own unless the ES indexing buffer is also huge and/or - // there are not too many shards allocated to this node. Instead, IndexingMemoryController periodically checks - // and refreshes the most heap-consuming shards when total indexing heap usage across all shards is too high: - indexingBufferSize = new ByteSizeValue(256, ByteSizeUnit.MB); + // We need to make the indexing buffer for this shard at least as large + // as the amount of memory that is available for all engines on the + // local node so that decisions to flush segments to disk are made by + // IndexingMemoryController rather than Lucene. + // Add an escape hatch in case this change proves problematic - it used + // to be a fixed amound of RAM: 256 MB. + // TODO: Remove this escape hatch in 8.x + final String escapeHatchProperty = "es.index.memory.max_index_buffer_size"; + String maxBufferSize = System.getProperty(escapeHatchProperty); + if (maxBufferSize != null) { + indexingBufferSize = MemorySizeValue.parseBytesSizeValueOrHeapRatio(maxBufferSize, escapeHatchProperty); + } else { + indexingBufferSize = IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING.get(indexSettings.getNodeSettings()); + } this.queryCache = queryCache; this.queryCachingPolicy = queryCachingPolicy; this.translogConfig = translogConfig; From 6fd4eb52b8261581cf3b7e5c3178fe7940d1b00f Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Wed, 6 Jun 2018 07:49:15 -0700 Subject: [PATCH 20/22] [DOCS] Moves commands to docs folder (#31114) --- {x-pack/docs/en => docs/reference}/commands/certgen.asciidoc | 1 + {x-pack/docs/en => docs/reference}/commands/certutil.asciidoc | 1 + {x-pack/docs/en => docs/reference}/commands/index.asciidoc | 0 .../docs/en => docs/reference}/commands/migrate-tool.asciidoc | 1 + .../docs/en => docs/reference}/commands/saml-metadata.asciidoc | 1 + .../en => docs/reference}/commands/setup-passwords.asciidoc | 1 + {x-pack/docs/en => docs/reference}/commands/syskeygen.asciidoc | 1 + .../docs/en => docs/reference}/commands/users-command.asciidoc | 1 + docs/reference/index.asciidoc | 2 +- 9 files changed, 8 insertions(+), 1 deletion(-) rename {x-pack/docs/en => docs/reference}/commands/certgen.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/commands/certutil.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/commands/index.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/commands/migrate-tool.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/commands/saml-metadata.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/commands/setup-passwords.asciidoc (99%) rename {x-pack/docs/en => docs/reference}/commands/syskeygen.asciidoc (98%) rename {x-pack/docs/en => docs/reference}/commands/users-command.asciidoc (99%) diff --git a/x-pack/docs/en/commands/certgen.asciidoc b/docs/reference/commands/certgen.asciidoc similarity index 99% rename from x-pack/docs/en/commands/certgen.asciidoc rename to docs/reference/commands/certgen.asciidoc index c2a00f11b69..3a8b15fbd28 100644 --- a/x-pack/docs/en/commands/certgen.asciidoc +++ b/docs/reference/commands/certgen.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold+"] [[certgen]] == certgen diff --git a/x-pack/docs/en/commands/certutil.asciidoc b/docs/reference/commands/certutil.asciidoc similarity index 99% rename from x-pack/docs/en/commands/certutil.asciidoc rename to docs/reference/commands/certutil.asciidoc index ad265c89f10..e0c6c701e31 100644 --- a/x-pack/docs/en/commands/certutil.asciidoc +++ b/docs/reference/commands/certutil.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold+"] [[certutil]] == elasticsearch-certutil diff --git a/x-pack/docs/en/commands/index.asciidoc b/docs/reference/commands/index.asciidoc similarity index 100% rename from x-pack/docs/en/commands/index.asciidoc rename to docs/reference/commands/index.asciidoc diff --git a/x-pack/docs/en/commands/migrate-tool.asciidoc b/docs/reference/commands/migrate-tool.asciidoc similarity index 99% rename from x-pack/docs/en/commands/migrate-tool.asciidoc rename to docs/reference/commands/migrate-tool.asciidoc index 1d19452df80..a1903ac69da 100644 --- a/x-pack/docs/en/commands/migrate-tool.asciidoc +++ b/docs/reference/commands/migrate-tool.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold+"] [[migrate-tool]] == elasticsearch-migrate diff --git a/x-pack/docs/en/commands/saml-metadata.asciidoc b/docs/reference/commands/saml-metadata.asciidoc similarity index 99% rename from x-pack/docs/en/commands/saml-metadata.asciidoc rename to docs/reference/commands/saml-metadata.asciidoc index 1cd283fd776..069c7135c01 100644 --- a/x-pack/docs/en/commands/saml-metadata.asciidoc +++ b/docs/reference/commands/saml-metadata.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold+"] [[saml-metadata]] == saml-metadata diff --git a/x-pack/docs/en/commands/setup-passwords.asciidoc b/docs/reference/commands/setup-passwords.asciidoc similarity index 99% rename from x-pack/docs/en/commands/setup-passwords.asciidoc rename to docs/reference/commands/setup-passwords.asciidoc index b323dc8e5c1..a7dcd25d65e 100644 --- a/x-pack/docs/en/commands/setup-passwords.asciidoc +++ b/docs/reference/commands/setup-passwords.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold+"] [[setup-passwords]] == elasticsearch-setup-passwords diff --git a/x-pack/docs/en/commands/syskeygen.asciidoc b/docs/reference/commands/syskeygen.asciidoc similarity index 98% rename from x-pack/docs/en/commands/syskeygen.asciidoc rename to docs/reference/commands/syskeygen.asciidoc index f4a198ff4bf..3ae7456448d 100644 --- a/x-pack/docs/en/commands/syskeygen.asciidoc +++ b/docs/reference/commands/syskeygen.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold+"] [[syskeygen]] == elasticsearch-syskeygen diff --git a/x-pack/docs/en/commands/users-command.asciidoc b/docs/reference/commands/users-command.asciidoc similarity index 99% rename from x-pack/docs/en/commands/users-command.asciidoc rename to docs/reference/commands/users-command.asciidoc index ab1b89b149b..e53e0815c5d 100644 --- a/x-pack/docs/en/commands/users-command.asciidoc +++ b/docs/reference/commands/users-command.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold+"] [[users-command]] == Users Command ++++ diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index b38a554d681..11006d38976 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -65,7 +65,7 @@ include::{xes-repo-dir}/rollup/index.asciidoc[] include::rest-api/index.asciidoc[] -include::{xes-repo-dir}/commands/index.asciidoc[] +include::commands/index.asciidoc[] :edit_url: include::how-to.asciidoc[] From 7c59e7690e04b87c148d9b9cb1c0000f533a4da7 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Wed, 6 Jun 2018 11:59:16 -0400 Subject: [PATCH 21/22] QA: Switch xpack rolling upgrades to three nodes (#31112) This is much more realistic and can find more issues. This causes the "mixed cluster" tests to be run twice so I had to fix the tests to work in that case. In most cases I did as little as possible to get them working but in a few cases I went a little beyond that to make them easier for me to debug while getting them to work. My test changes: 1. Remove the "basic indexing" tests and replace them with a copy of the tests used in the OSS. We have no way of sharing code between these two projects so for now I copy. 2. Skip the a few tests in the "one third" upgraded scenario: * creating a scroll to be reused when the cluster is fully upgraded * creating some ml data to be used when the cluster is fully ugpraded 3. Drop many "assert yellow and that the cluster has two nodes" assertions. These assertions duplicate those made by the wait condition and they fail now that we have three nodes. 4. Switch many "assert green and that the cluster has two nodes" to 3 nodes. These assertions are unique from the wait condition and, while I imagine they aren't required in all cases, now is not the time to find that out. Thus, I made them work. 5. Rework the index audit trail test so it is more obvious that it is the same test expecting different numbers based on the shape of the cluster. The conditions for which number are expected are fairly complex because the index audit trail is shut down until the template for it is upgraded and the template is upgraded when a master node is elected that has the new version of the software. 6. Add some more information to debug the index audit trail test because it helped me figure out what was going on. I also dropped the `waitCondition` from the `rolling-upgrade-basic` tests because it wasn't needed. Closes #25336 --- .../elasticsearch/upgrades/IndexingIT.java | 4 + x-pack/qa/rolling-upgrade-basic/build.gradle | 146 ++++++---------- x-pack/qa/rolling-upgrade/build.gradle | 136 +++++++-------- .../upgrades/AbstractUpgradeTestCase.java | 6 +- .../upgrades/IndexAuditUpgradeIT.java | 83 +++++---- .../elasticsearch/upgrades/IndexingIT.java | 124 ++++++++++++++ .../TokenBackwardsCompatibilityIT.java | 8 +- .../test/mixed_cluster/10_basic.yml | 159 +----------------- .../test/mixed_cluster/20_security.yml | 11 -- .../test/mixed_cluster/30_ml_jobs_crud.yml | 7 - .../mixed_cluster/40_ml_datafeed_crud.yml | 6 - .../test/old_cluster/10_basic.yml | 31 ---- .../test/upgraded_cluster/10_basic.yml | 39 +---- .../test/upgraded_cluster/20_security.yml | 3 +- .../test/upgraded_cluster/30_ml_jobs_crud.yml | 2 +- .../upgraded_cluster/40_ml_datafeed_crud.yml | 3 +- 16 files changed, 315 insertions(+), 453 deletions(-) create mode 100644 x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java delete mode 100644 x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index f1e01d24acf..3898746e5c3 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -30,6 +30,10 @@ import java.nio.charset.StandardCharsets; * Basic test that indexed documents survive the rolling restart. See * {@link RecoveryIT} for much more in depth testing of the mechanism * by which they survive. + *

+ * This test is an almost exact copy of IndexingIT in the + * xpack rolling restart tests. We should work on a way to remove this + * duplication but for now we have no real way to share code. */ public class IndexingIT extends AbstractRollingTestCase { public void testIndexing() throws IOException { diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index 91a6d106c98..3592d34c9f4 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -8,62 +8,9 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'runtime') - testCompile project(path: xpackModule('security'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit } -Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> - File tmpFile = new File(node.cwd, 'wait.success') - - // wait up to two minutes - final long stopTime = System.currentTimeMillis() + (2 * 60000L); - Exception lastException = null; - int lastResponseCode = 0 - - while (System.currentTimeMillis() < stopTime) { - - lastException = null; - // we use custom wait logic here as the elastic user is not available immediately and ant.get will fail when a 401 is returned - HttpURLConnection httpURLConnection = null; - try { - // TODO this sucks having to hardcode number of nodes, but node.config.numNodes isn't necessarily accurate for rolling - httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=2&wait_for_status=yellow").openConnection(); - httpURLConnection.setRequestProperty("Authorization", "Basic " + - Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); - httpURLConnection.setRequestMethod("GET"); - httpURLConnection.setConnectTimeout(1000); - httpURLConnection.setReadTimeout(30000); // read needs to wait for nodes! - httpURLConnection.connect(); - lastResponseCode = httpURLConnection.getResponseCode() - if (lastResponseCode == 200) { - tmpFile.withWriter StandardCharsets.UTF_8.name(), { - it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) - } - break; - } - } catch (Exception e) { - logger.debug("failed to call cluster health", e) - lastException = e - } finally { - if (httpURLConnection != null) { - httpURLConnection.disconnect(); - } - } - - // did not start, so wait a bit before trying again - Thread.sleep(500L); - } - if (tmpFile.exists() == false) { - final String message = "final attempt of calling cluster health failed [lastResponseCode=${lastResponseCode}]" - if (lastException != null) { - logger.error(message, lastException) - } else { - logger.error(message + " [no exception]") - } - } - return tmpFile.exists() -} - // This is a top level task which we will add dependencies to below. // It is a single task that can be used to backcompat tests against all versions. task bwcTest { @@ -82,14 +29,13 @@ for (Version version : bwcVersions.wireCompatible) { configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { if (version.before('6.3.0')) { - mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}" + mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}" } bwcVersion = version - numBwcNodes = 2 - numNodes = 2 - minimumMasterNodes = { 2 } + numBwcNodes = 3 + numNodes = 3 + minimumMasterNodes = { 3 } clusterName = 'rolling-upgrade-basic' - waitCondition = waitWithAuth setting 'xpack.security.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.ml.enabled', 'false' @@ -102,51 +48,62 @@ for (Version version : bwcVersions.wireCompatible) { systemProperty 'tests.rest.suite', 'old_cluster' } - Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask) - - configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) { - dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop" - clusterName = 'rolling-upgrade-basic' - unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() } - minimumMasterNodes = { 2 } - dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir } - waitCondition = waitWithAuth - setting 'xpack.security.enabled', 'false' - setting 'xpack.monitoring.enabled', 'false' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.watcher.enabled', 'false' - setting 'xpack.license.self_generated.type', 'basic' - setting 'node.name', 'mixed-node-0' + Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed -> + configure(extensions.findByName("${baseName}#${name}")) { + dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop" + clusterName = 'rolling-upgrade-basic' + unicastTransportUri = { seedNode, node, ant -> unicastSeed() } + minimumMasterNodes = { 3 } + /* Override the data directory so the new node always gets the node we + * just stopped's data directory. */ + dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } + setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.license.self_generated.type', 'basic' + } } - Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner") - mixedClusterTestRunner.configure { + Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner, + 0, { oldClusterTest.nodes.get(1).transportUri() }) + + Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner") + oneThirdUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' - finalizedBy "${baseName}#oldClusterTestCluster#node0.stop" + systemProperty 'tests.first_round', 'true' + finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" + } + + Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner, + 1, { oneThirdUpgradedTest.nodes.get(0).transportUri() }) + + Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner") + twoThirdsUpgradedTestRunner.configure { + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.first_round', 'false' + finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" } Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) - configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { - dependsOn(mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop") - clusterName = 'rolling-upgrade-basic' - unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() } - minimumMasterNodes = { 2 } - dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir } - waitCondition = waitWithAuth - setting 'xpack.security.enabled', 'false' - setting 'xpack.monitoring.enabled', 'false' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.watcher.enabled', 'false' - setting 'xpack.license.self_generated.type', 'basic' - setting 'node.name', 'upgraded-node-0' - } + configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner, + 2, { twoThirdsUpgradedTest.nodes.get(0).transportUri() }) Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'upgraded_cluster' - // only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion - finalizedBy "${baseName}#mixedClusterTestCluster#stop" + /* + * Force stopping all the upgraded nodes after the test runner + * so they are alive during the test. + */ + finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop" + finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop" } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { @@ -170,11 +127,6 @@ task integTest { } check.dependsOn(integTest) -dependencies { - testCompile project(path: xpackModule('core'), configuration: 'runtime') - testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') -} - compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" // copy x-pack plugin info so it is on the classpath and security manager has the right permissions diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 6e93041e9a0..f11addb42b8 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -30,7 +30,7 @@ Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> HttpURLConnection httpURLConnection = null; try { // TODO this sucks having to hardcode number of nodes, but node.config.numNodes isn't necessarily accurate for rolling - httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=2&wait_for_status=yellow").openConnection(); + httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=3&wait_for_status=yellow").openConnection(); httpURLConnection.setRequestProperty("Authorization", "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); httpURLConnection.setRequestMethod("GET"); @@ -128,9 +128,9 @@ subprojects { String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' bwcVersion = version - numBwcNodes = 2 - numNodes = 2 - minimumMasterNodes = { 2 } + numBwcNodes = 3 + numNodes = 3 + minimumMasterNodes = { 3 } clusterName = 'rolling-upgrade' waitCondition = waitWithAuth setting 'xpack.monitoring.exporters._http.type', 'http' @@ -167,78 +167,84 @@ subprojects { systemProperty 'tests.rest.suite', 'old_cluster' } - Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask) - - configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) { - dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop" - setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - clusterName = 'rolling-upgrade' - unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() } - minimumMasterNodes = { 2 } - dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir } - waitCondition = waitWithAuth - setting 'xpack.monitoring.exporters._http.type', 'http' - setting 'xpack.monitoring.exporters._http.enabled', 'false' - setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' - setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.ssl.keystore.path', 'testnode.jks' - keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode' - setting 'node.attr.upgraded', 'first' - setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.audit.enabled', 'true' - setting 'xpack.security.audit.outputs', 'index' - setting 'node.name', 'mixed-node-0' - dependsOn copyTestNodeKeystore - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - if (withSystemKey) { - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed -> + configure(extensions.findByName("${baseName}#${name}")) { + dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop" + setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + clusterName = 'rolling-upgrade' + unicastTransportUri = { seedNode, node, ant -> unicastSeed() } + minimumMasterNodes = { 3 } + /* Override the data directory so the new node always gets the node we + * just stopped's data directory. */ + dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } + waitCondition = waitWithAuth + setting 'xpack.monitoring.exporters._http.type', 'http' + setting 'xpack.monitoring.exporters._http.enabled', 'false' + setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' + setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.ssl.keystore.path', 'testnode.jks' + keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode' + setting 'node.attr.upgraded', 'true' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.audit.enabled', 'true' + setting 'xpack.security.audit.outputs', 'index' + setting 'node.name', "upgraded-node-${stopNode}" + dependsOn copyTestNodeKeystore + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + if (withSystemKey) { + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + } } } - Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner") - mixedClusterTestRunner.configure { + Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner, + 0, { oldClusterTest.nodes.get(1).transportUri() }) + + Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner") + oneThirdUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' - finalizedBy "${baseName}#oldClusterTestCluster#node0.stop" + systemProperty 'tests.first_round', 'true' + // We only need to run these tests once so we may as well do it when we're two thirds upgraded + systemProperty 'tests.rest.blacklist', [ + 'mixed_cluster/10_basic/Start scroll in mixed cluster on upgraded node that we will continue after upgrade', + 'mixed_cluster/30_ml_jobs_crud/Create a job in the mixed cluster and write some data', + 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed in mixed cluster', + ].join(',') + finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" + } + + Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner, + 1, { oneThirdUpgradedTest.nodes.get(0).transportUri() }) + + Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner") + twoThirdsUpgradedTestRunner.configure { + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.first_round', 'false' + finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" } Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) - configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { - dependsOn(mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop") - setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - clusterName = 'rolling-upgrade' - unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() } - minimumMasterNodes = { 2 } - dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir } - waitCondition = waitWithAuth - setting 'xpack.monitoring.exporters._http.type', 'http' - setting 'xpack.monitoring.exporters._http.enabled', 'false' - setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' - setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.ssl.keystore.path', 'testnode.jks' - keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode' - setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.audit.enabled', 'true' - setting 'xpack.security.audit.outputs', 'index' - setting 'node.name', 'upgraded-node-0' - dependsOn copyTestNodeKeystore - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - if (withSystemKey) { - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" - } - } + configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner, + 2, { twoThirdsUpgradedTest.nodes.get(0).transportUri() }) Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'upgraded_cluster' + /* + * Force stopping all the upgraded nodes after the test runner + * so they are alive during the test. + */ + finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop" + finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop" // migration tests should only run when the original/old cluster nodes where versions < 5.2.0. // this stinks but we do the check here since our rest tests do not support conditionals @@ -251,8 +257,6 @@ subprojects { systemProperty 'tests.rest.blacklist', '/20_security/Verify default password migration results in upgraded cluster' } } - // only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion - finalizedBy "${baseName}#mixedClusterTestCluster#stop" } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java index 65b1a7c85dc..a3576b7b8c3 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java @@ -37,12 +37,12 @@ public abstract class AbstractUpgradeTestCase extends ESRestTestCase { return true; } - enum CLUSTER_TYPE { + enum ClusterType { OLD, MIXED, UPGRADED; - public static CLUSTER_TYPE parse(String value) { + public static ClusterType parse(String value) { switch (value) { case "old_cluster": return OLD; @@ -56,7 +56,7 @@ public abstract class AbstractUpgradeTestCase extends ESRestTestCase { } } - protected final CLUSTER_TYPE clusterType = CLUSTER_TYPE.parse(System.getProperty("tests.rest.suite")); + protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite")); @Override protected Settings restClientSettings() { diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java index 1f76e670854..da6f9133d03 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java @@ -8,37 +8,48 @@ package org.elasticsearch.upgrades; import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.Version; import org.elasticsearch.client.Response; +import org.elasticsearch.common.Booleans; import org.hamcrest.Matchers; +import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.hasSize; public class IndexAuditUpgradeIT extends AbstractUpgradeTestCase { - public void testDocsAuditedInOldCluster() throws Exception { - assumeTrue("only runs against old cluster", clusterType == CLUSTER_TYPE.OLD); + public void testAuditLogs() throws Exception { assertBusy(() -> { assertAuditDocsExist(); - assertNumUniqueNodeNameBuckets(2); - }); + assertNumUniqueNodeNameBuckets(expectedNumUniqueNodeNameBuckets()); + }, 1, TimeUnit.HOURS); } - public void testDocsAuditedInMixedCluster() throws Exception { - assumeTrue("only runs against mixed cluster", clusterType == CLUSTER_TYPE.MIXED); - assertBusy(() -> { - assertAuditDocsExist(); - assertNumUniqueNodeNameBuckets(2); - }); - } - - public void testDocsAuditedInUpgradedCluster() throws Exception { - assumeTrue("only runs against upgraded cluster", clusterType == CLUSTER_TYPE.UPGRADED); - assertBusy(() -> { - assertAuditDocsExist(); - assertNumUniqueNodeNameBuckets(4); - }); + private int expectedNumUniqueNodeNameBuckets() throws IOException { + switch (CLUSTER_TYPE) { + case OLD: + // There are three nodes in the initial test cluster + return 3; + case MIXED: + if (false == masterIsNewVersion()) { + return 3; + } + if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) { + // One of the old nodes has been removed and we've added a new node + return 4; + } + // Two of the old nodes have been removed and we've added two new nodes + return 5; + case UPGRADED: + return 6; + default: + throw new IllegalArgumentException("Unsupported cluster type [" + CLUSTER_TYPE + "]"); + } } private void assertAuditDocsExist() throws Exception { @@ -51,26 +62,40 @@ public class IndexAuditUpgradeIT extends AbstractUpgradeTestCase { private void assertNumUniqueNodeNameBuckets(int numBuckets) throws Exception { // call API that will hit all nodes - assertEquals(200, client().performRequest("GET", "/_nodes").getStatusLine().getStatusCode()); + Map nodesResponse = entityAsMap(client().performRequest("GET", "/_nodes/_all/info/version")); + logger.info("all nodes {}", nodesResponse); HttpEntity httpEntity = new StringEntity( "{\n" + - " \"aggs\" : {\n" + - " \"nodes\" : {\n" + - " \"terms\" : { \"field\" : \"node_name\" }\n" + - " }\n" + - " }\n" + - "}", ContentType.APPLICATION_JSON); + " \"aggs\" : {\n" + + " \"nodes\" : {\n" + + " \"terms\" : { \"field\" : \"node_name\" }\n" + + " }\n" + + " }\n" + + "}", ContentType.APPLICATION_JSON); Response aggResponse = client().performRequest("GET", "/.security_audit_log*/_search", Collections.singletonMap("pretty", "true"), httpEntity); Map aggResponseMap = entityAsMap(aggResponse); logger.debug("aggResponse {}", aggResponseMap); - Map aggregations = (Map) aggResponseMap.get("aggregations"); + Map aggregations = (Map) aggResponseMap.get("aggregations"); assertNotNull(aggregations); - Map nodesAgg = (Map) aggregations.get("nodes"); + Map nodesAgg = (Map) aggregations.get("nodes"); assertNotNull(nodesAgg); - List> buckets = (List>) nodesAgg.get("buckets"); + List buckets = (List) nodesAgg.get("buckets"); assertNotNull(buckets); - assertEquals("Found node buckets " + buckets, numBuckets, buckets.size()); + assertThat("Found node buckets " + buckets, buckets, hasSize(numBuckets)); + } + + /** + * Has the master been upgraded to the new version? + * @throws IOException + */ + private boolean masterIsNewVersion() throws IOException { + Map map = entityAsMap(client().performRequest("GET", "/_nodes/_master")); + map = (Map) map.get("nodes"); + assertThat(map.values(), hasSize(1)); + map = (Map) map.values().iterator().next(); + Version masterVersion = Version.fromString(map.get("version").toString()); + return Version.CURRENT.equals(masterVersion); } } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java new file mode 100644 index 00000000000..3448117cd2c --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +/** + * Basic test that indexed documents survive the rolling restart. + *

+ * This test is an almost exact copy of IndexingIT in the + * oss rolling restart tests. We should work on a way to remove this + * duplication but for now we have no real way to share code. + */ +public class IndexingIT extends AbstractUpgradeTestCase { + public void testIndexing() throws IOException { + switch (CLUSTER_TYPE) { + case OLD: + break; + case MIXED: + Request waitForYellow = new Request("GET", "/_cluster/health"); + waitForYellow.addParameter("wait_for_nodes", "3"); + waitForYellow.addParameter("wait_for_status", "yellow"); + client().performRequest(waitForYellow); + break; + case UPGRADED: + Request waitForGreen = new Request("GET", "/_cluster/health/test_index,index_with_replicas,empty_index"); + waitForGreen.addParameter("wait_for_nodes", "3"); + waitForGreen.addParameter("wait_for_status", "green"); + // wait for long enough that we give delayed unassigned shards to stop being delayed + waitForGreen.addParameter("timeout", "70s"); + waitForGreen.addParameter("level", "shards"); + client().performRequest(waitForGreen); + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + if (CLUSTER_TYPE == ClusterType.OLD) { + Request createTestIndex = new Request("PUT", "/test_index"); + createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); + client().performRequest(createTestIndex); + + String recoverQuickly = "{\"settings\": {\"index.unassigned.node_left.delayed_timeout\": \"100ms\"}}"; + Request createIndexWithReplicas = new Request("PUT", "/index_with_replicas"); + createIndexWithReplicas.setJsonEntity(recoverQuickly); + client().performRequest(createIndexWithReplicas); + + Request createEmptyIndex = new Request("PUT", "/empty_index"); + // Ask for recovery to be quick + createEmptyIndex.setJsonEntity(recoverQuickly); + client().performRequest(createEmptyIndex); + + bulk("test_index", "_OLD", 5); + bulk("index_with_replicas", "_OLD", 5); + } + + int expectedCount; + switch (CLUSTER_TYPE) { + case OLD: + expectedCount = 5; + break; + case MIXED: + if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) { + expectedCount = 5; + } else { + expectedCount = 10; + } + break; + case UPGRADED: + expectedCount = 15; + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + assertCount("test_index", expectedCount); + assertCount("index_with_replicas", 5); + assertCount("empty_index", 0); + + if (CLUSTER_TYPE != ClusterType.OLD) { + bulk("test_index", "_" + CLUSTER_TYPE, 5); + Request toBeDeleted = new Request("PUT", "/test_index/doc/to_be_deleted"); + toBeDeleted.addParameter("refresh", "true"); + toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}"); + client().performRequest(toBeDeleted); + assertCount("test_index", expectedCount + 6); + + Request delete = new Request("DELETE", "/test_index/doc/to_be_deleted"); + delete.addParameter("refresh", "true"); + client().performRequest(delete); + + assertCount("test_index", expectedCount + 5); + } + } + + private void bulk(String index, String valueSuffix, int count) throws IOException { + StringBuilder b = new StringBuilder(); + for (int i = 0; i < count; i++) { + b.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"doc\"}}\n"); + b.append("{\"f1\": \"v").append(i).append(valueSuffix).append("\", \"f2\": ").append(i).append("}\n"); + } + Request bulk = new Request("POST", "/_bulk"); + bulk.addParameter("refresh", "true"); + bulk.setJsonEntity(b.toString()); + client().performRequest(bulk); + } + + private void assertCount(String index, int count) throws IOException { + Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); + searchTestIndexRequest.addParameter("filter_path", "hits.total"); + Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest); + assertEquals("{\"hits\":{\"total\":" + count + "}}", + EntityUtils.toString(searchTestIndexResponse.getEntity(), StandardCharsets.UTF_8)); + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java index 4fa0c9a535f..705122252e7 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java @@ -25,7 +25,7 @@ import java.util.Map; public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { public void testGeneratingTokenInOldCluster() throws Exception { - assumeTrue("this test should only run against the old cluster", clusterType == CLUSTER_TYPE.OLD); + assumeTrue("this test should only run against the old cluster", CLUSTER_TYPE == ClusterType.OLD); final StringEntity tokenPostBody = new StringEntity("{\n" + " \"username\": \"test_user\",\n" + " \"password\": \"x-pack-test-password\",\n" + @@ -61,7 +61,7 @@ public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { public void testTokenWorksInMixedOrUpgradedCluster() throws Exception { assumeTrue("this test should only run against the mixed or upgraded cluster", - clusterType == CLUSTER_TYPE.MIXED || clusterType == CLUSTER_TYPE.UPGRADED); + CLUSTER_TYPE == ClusterType.MIXED || CLUSTER_TYPE == ClusterType.UPGRADED); Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token1"); assertOK(getResponse); Map source = (Map) entityAsMap(getResponse).get("_source"); @@ -69,7 +69,7 @@ public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { } public void testMixedCluster() throws Exception { - assumeTrue("this test should only run against the mixed cluster", clusterType == CLUSTER_TYPE.MIXED); + assumeTrue("this test should only run against the mixed cluster", CLUSTER_TYPE == ClusterType.MIXED); assumeTrue("the master must be on the latest version before we can write", isMasterOnLatestVersion()); Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); assertOK(getResponse); @@ -117,7 +117,7 @@ public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { } public void testUpgradedCluster() throws Exception { - assumeTrue("this test should only run against the mixed cluster", clusterType == CLUSTER_TYPE.UPGRADED); + assumeTrue("this test should only run against the mixed cluster", CLUSTER_TYPE == ClusterType.UPGRADED); Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); assertOK(getResponse); Map source = (Map) entityAsMap(getResponse).get("_source"); diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml index 93db3996a6b..3dd1f708959 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml @@ -1,166 +1,13 @@ --- -setup: - - do: - cluster.health: - # if the primary shard of an index with (number_of_replicas > 0) ends up on the new node, the replica cannot be - # allocated to the old node (see NodeVersionAllocationDecider). x-pack automatically creates indices with - # replicas, for example monitoring-data-*. - wait_for_status: yellow - wait_for_nodes: 2 - ---- -"Index data and search on the mixed cluster": - - do: - search: - index: test_index - - - match: { hits.total: 5 } # no new indexed data, so expect the original 5 documents from the old cluster - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v1_mixed", "f2": 5}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v2_mixed", "f2": 6}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v3_mixed", "f2": 7}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v4_mixed", "f2": 8}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v5_mixed", "f2": 9}' - - - do: - index: - index: test_index - type: test_type - id: d10 - body: {"f1": "v6_mixed", "f2": 10} - - - do: - index: - index: test_index - type: test_type - id: d11 - body: {"f1": "v7_mixed", "f2": 11} - - - do: - index: - index: test_index - type: test_type - id: d12 - body: {"f1": "v8_mixed", "f2": 12} - - - do: - indices.refresh: - index: test_index - - - do: - search: - index: test_index - - - match: { hits.total: 13 } # 5 docs from old cluster, 8 docs from mixed cluster - - - do: - delete: - index: test_index - type: test_type - id: d10 - - - do: - delete: - index: test_index - type: test_type - id: d11 - - - do: - delete: - index: test_index - type: test_type - id: d12 - - - do: - indices.refresh: - index: test_index - ---- -"Basic scroll mixed": - - do: - indices.create: - index: test_scroll - - do: - index: - index: test_scroll - type: test - id: 42 - body: { foo: 1 } - - - do: - index: - index: test_scroll - type: test - id: 43 - body: { foo: 2 } - - - do: - indices.refresh: {} - - - do: - search: - index: test_scroll - size: 1 - scroll: 1m - sort: foo - body: - query: - match_all: {} - - - set: {_scroll_id: scroll_id} - - match: {hits.total: 2 } - - length: {hits.hits: 1 } - - match: {hits.hits.0._id: "42" } - - - do: - index: - index: test_scroll - type: test - id: 44 - body: { foo: 3 } - - - do: - indices.refresh: {} - - - do: - scroll: - body: { "scroll_id": "$scroll_id", "scroll": "1m"} - - - match: {hits.total: 2 } - - length: {hits.hits: 1 } - - match: {hits.hits.0._id: "43" } - - - do: - scroll: - scroll_id: $scroll_id - scroll: 1m - - - match: {hits.total: 2 } - - length: {hits.hits: 0 } - - - do: - clear_scroll: - scroll_id: $scroll_id - ---- -"Start scroll in mixed cluster for upgraded": +"Start scroll in mixed cluster on upgraded node that we will continue after upgrade": - do: indices.create: index: upgraded_scroll wait_for_active_shards: all body: settings: - number_of_replicas: "0" - index.routing.allocation.include.upgraded: "first" + number_of_replicas: 0 + index.routing.allocation.include.upgraded: true - do: index: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml index 750bedc4c6d..cfe3ca97330 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml @@ -1,13 +1,5 @@ --- "Verify user and role in mixed cluster": - - do: - headers: - Authorization: "Basic bmF0aXZlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" - cluster.health: - wait_for_status: yellow - wait_for_nodes: 2 - - match: { timed_out: false } - - do: xpack.security.get_user: username: "native_user" @@ -36,6 +28,3 @@ username: "kibana,logstash_system" - match: { kibana.enabled: false } - match: { logstash_system.enabled: true } - - - diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml index daf2f913fff..6ea8771c237 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml @@ -1,10 +1,3 @@ ---- -setup: - - do: - cluster.health: - wait_for_status: yellow - wait_for_nodes: 2 - --- "Test get old cluster job": - skip: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml index 8a06c91cc8a..0ec288f9097 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -1,9 +1,3 @@ -setup: - - do: - cluster.health: - wait_for_status: yellow - wait_for_nodes: 2 - --- "Test old cluster datafeed": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml deleted file mode 100644 index a780709400a..00000000000 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -"Index data and search on the old cluster": - - do: - indices.create: - index: test_index - wait_for_active_shards : all - body: - settings: - index: - number_of_replicas: 1 - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v1_old", "f2": 0}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v2_old", "f2": 1}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v3_old", "f2": 2}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v4_old", "f2": 3}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v5_old", "f2": 4}' - - - do: - search: - index: test_index - - - match: { hits.total: 5 } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index 9c3443339a7..7249b4a32c7 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -1,42 +1,5 @@ --- -"Index data and search on the upgraded cluster": - - do: - cluster.health: - wait_for_status: green - wait_for_nodes: 2 - # wait for long enough that we give delayed unassigned shards to stop being delayed - timeout: 70s - level: shards - - - do: - search: - index: test_index - - - match: { hits.total: 10 } # no new indexed data, so expect the original 10 documents from the old and mixed clusters - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v1_upgraded", "f2": 10}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v2_upgraded", "f2": 11}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v3_upgraded", "f2": 12}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v4_upgraded", "f2": 13}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v5_upgraded", "f2": 14}' - - - do: - search: - index: test_index - - - match: { hits.total: 15 } # 10 docs from previous clusters plus 5 new docs - ---- -"Get indexed scroll and execute scroll": +"Continue scroll after upgrade": - do: get: index: scroll_index diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml index 9c709748391..46ade4823a2 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml @@ -5,7 +5,7 @@ Authorization: "Basic bmF0aXZlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" cluster.health: wait_for_status: green - wait_for_nodes: 2 + wait_for_nodes: 3 # wait for long enough that we give delayed unassigned shards to stop being delayed timeout: 70s - match: { timed_out: false } @@ -22,4 +22,3 @@ - match: { native_role.cluster.0: "all" } - match: { native_role.indices.0.names.0: "test_index" } - match: { native_role.indices.0.privileges.0: "all" } - diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml index 9520e954d7b..91d29457289 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml @@ -2,7 +2,7 @@ setup: - do: cluster.health: wait_for_status: green - wait_for_nodes: 2 + wait_for_nodes: 3 # wait for long enough that we give delayed unassigned shards to stop being delayed timeout: 70s diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index ed6a66ae1a5..6b4c963dd53 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -2,7 +2,7 @@ setup: - do: cluster.health: wait_for_status: green - wait_for_nodes: 2 + wait_for_nodes: 3 # wait for long enough that we give delayed unassigned shards to stop being delayed timeout: 70s @@ -97,4 +97,3 @@ setup: xpack.ml.delete_job: job_id: mixed-cluster-datafeed-job - match: { acknowledged: true } - From 45537c59e59287216099138f14cf58c5416503d0 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Wed, 6 Jun 2018 10:05:32 -0700 Subject: [PATCH 22/22] [DOCS] Moves X-Pack settings to docs folder (#31120) --- docs/reference/index.asciidoc | 2 +- .../reference}/settings/audit-settings.asciidoc | 0 .../reference}/settings/configuring-xes.asciidoc | 0 .../settings/images/monitoring-es-cgroup-true.png | Bin .../reference}/settings/license-settings.asciidoc | 0 .../reference}/settings/ml-settings.asciidoc | 0 .../settings/monitoring-settings.asciidoc | 0 .../settings/notification-settings.asciidoc | 0 .../reference}/settings/security-settings.asciidoc | 0 .../reference}/settings/sql-settings.asciidoc | 0 .../reference}/settings/ssl-settings.asciidoc | 0 .../en/monitoring/configuring-monitoring.asciidoc | 2 +- x-pack/docs/en/security/configuring-es.asciidoc | 4 ++-- 13 files changed, 4 insertions(+), 4 deletions(-) rename {x-pack/docs/en => docs/reference}/settings/audit-settings.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/settings/configuring-xes.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/settings/images/monitoring-es-cgroup-true.png (100%) rename {x-pack/docs/en => docs/reference}/settings/license-settings.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/settings/ml-settings.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/settings/monitoring-settings.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/settings/notification-settings.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/settings/security-settings.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/settings/sql-settings.asciidoc (100%) rename {x-pack/docs/en => docs/reference}/settings/ssl-settings.asciidoc (100%) diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 11006d38976..17cdde32b07 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -22,7 +22,7 @@ include::{xes-repo-dir}/security/configuring-es.asciidoc[] include::{xes-repo-dir}/setup/setup-xclient.asciidoc[] -include::{xes-repo-dir}/settings/configuring-xes.asciidoc[] +include::settings/configuring-xes.asciidoc[] include::{xes-repo-dir}/setup/bootstrap-checks-xes.asciidoc[] diff --git a/x-pack/docs/en/settings/audit-settings.asciidoc b/docs/reference/settings/audit-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/audit-settings.asciidoc rename to docs/reference/settings/audit-settings.asciidoc diff --git a/x-pack/docs/en/settings/configuring-xes.asciidoc b/docs/reference/settings/configuring-xes.asciidoc similarity index 100% rename from x-pack/docs/en/settings/configuring-xes.asciidoc rename to docs/reference/settings/configuring-xes.asciidoc diff --git a/x-pack/docs/en/settings/images/monitoring-es-cgroup-true.png b/docs/reference/settings/images/monitoring-es-cgroup-true.png similarity index 100% rename from x-pack/docs/en/settings/images/monitoring-es-cgroup-true.png rename to docs/reference/settings/images/monitoring-es-cgroup-true.png diff --git a/x-pack/docs/en/settings/license-settings.asciidoc b/docs/reference/settings/license-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/license-settings.asciidoc rename to docs/reference/settings/license-settings.asciidoc diff --git a/x-pack/docs/en/settings/ml-settings.asciidoc b/docs/reference/settings/ml-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/ml-settings.asciidoc rename to docs/reference/settings/ml-settings.asciidoc diff --git a/x-pack/docs/en/settings/monitoring-settings.asciidoc b/docs/reference/settings/monitoring-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/monitoring-settings.asciidoc rename to docs/reference/settings/monitoring-settings.asciidoc diff --git a/x-pack/docs/en/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/notification-settings.asciidoc rename to docs/reference/settings/notification-settings.asciidoc diff --git a/x-pack/docs/en/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/security-settings.asciidoc rename to docs/reference/settings/security-settings.asciidoc diff --git a/x-pack/docs/en/settings/sql-settings.asciidoc b/docs/reference/settings/sql-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/sql-settings.asciidoc rename to docs/reference/settings/sql-settings.asciidoc diff --git a/x-pack/docs/en/settings/ssl-settings.asciidoc b/docs/reference/settings/ssl-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/ssl-settings.asciidoc rename to docs/reference/settings/ssl-settings.asciidoc diff --git a/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc b/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc index 99c69eeea8a..1712c88380b 100644 --- a/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc +++ b/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc @@ -142,4 +142,4 @@ stored, that is to say the monitoring cluster. To grant all of the necessary per <>. include::indices.asciidoc[] -include::{xes-repo-dir}/settings/monitoring-settings.asciidoc[] \ No newline at end of file +include::{es-repo-dir}/settings/monitoring-settings.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index d8ef6c2809b..5e8f1adbc7a 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -142,5 +142,5 @@ include::authentication/configuring-ldap-realm.asciidoc[] include::authentication/configuring-native-realm.asciidoc[] include::authentication/configuring-pki-realm.asciidoc[] include::authentication/configuring-saml-realm.asciidoc[] -include::{xes-repo-dir}/settings/security-settings.asciidoc[] -include::{xes-repo-dir}/settings/audit-settings.asciidoc[] +include::{es-repo-dir}/settings/security-settings.asciidoc[] +include::{es-repo-dir}/settings/audit-settings.asciidoc[]