From 4e9bf3f18a540be97c4261f5176d93ba02f96a08 Mon Sep 17 00:00:00 2001 From: Tomas Della Vedova Date: Mon, 20 May 2019 06:40:45 -0400 Subject: [PATCH 01/25] Remove deprecated _source_exclude and _source_include from get API spec (#42188) Support for these parameters was removed in #35097. The spec were left outdated. --- .../src/main/resources/rest-api-spec/api/get.json | 8 -------- 1 file changed, 8 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json index a6d77ec811b..f4e0fdd5f90 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/get.json @@ -61,14 +61,6 @@ "type" : "list", "description" : "A list of fields to extract and return from the _source field" }, - "_source_exclude": { - "type" : "list", - "description" : "A list of fields to exclude from the returned _source field" - }, - "_source_include": { - "type" : "list", - "description" : "A list of fields to extract and return from the _source field" - }, "version" : { "type" : "number", "description" : "Explicit version number for concurrency control" From fd2d4d761b5724b1f64e1b5be71d47ade04de6b1 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Mon, 20 May 2019 09:06:42 -0400 Subject: [PATCH 02/25] [DOCS] Updates TLS configuration info (#41983) --- .../configuring-tls-docker.asciidoc | 6 ++---- .../securing-elasticsearch.asciidoc | 4 ++-- .../setting-up-ssl.asciidoc | 15 +++++++-------- .../reference/setup/bootstrap-checks-xes.asciidoc | 5 ++--- .../en/security/securing-communications.asciidoc | 3 +-- 5 files changed, 14 insertions(+), 19 deletions(-) diff --git a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc b/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc index 2bc2300174e..1d23430e37e 100644 --- a/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc +++ b/docs/reference/security/securing-communications/configuring-tls-docker.asciidoc @@ -2,10 +2,8 @@ [[configuring-tls-docker]] === Encrypting communications in an {es} Docker Container -Starting with version 6.0.0, {stack} {security-features} -(Gold, Platinum or Enterprise subscriptions) -https://www.elastic.co/guide/en/elasticsearch/reference/6.0/breaking-6.0.0-xes.html[require SSL/TLS] -encryption for the transport networking layer. +Unless you are using a trial license, {stack} {security-features} require +SSL/TLS encryption for the transport networking layer. This section demonstrates an easy path to get started with SSL/TLS for both HTTPS and transport using the {es} Docker image. The example uses diff --git a/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc b/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc index 9d207f26a96..a24e272dd89 100644 --- a/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc +++ b/docs/reference/security/securing-communications/securing-elasticsearch.asciidoc @@ -7,8 +7,8 @@ your {es} cluster. Connections are secured using Transport Layer Security (TLS/SSL). WARNING: Clusters that do not have encryption enabled send all data in plain text -including passwords and will not be able to install a license that enables -{security-features}. +including passwords. If the {es} {security-features} are enabled, unless you +have a trial license, you must configure SSL/TLS for internode-communication. To enable encryption, you need to perform the following steps on each node in the cluster: diff --git a/docs/reference/security/securing-communications/setting-up-ssl.asciidoc b/docs/reference/security/securing-communications/setting-up-ssl.asciidoc index 90f9b040d9d..68eda2cdc3e 100644 --- a/docs/reference/security/securing-communications/setting-up-ssl.asciidoc +++ b/docs/reference/security/securing-communications/setting-up-ssl.asciidoc @@ -1,16 +1,15 @@ [[ssl-tls]] -=== Setting Up TLS on a cluster +=== Setting up TLS on a cluster -The {stack} {security-features} enables you to encrypt traffic to, from, and +The {stack} {security-features} enable you to encrypt traffic to, from, and within your {es} cluster. Connections are secured using Transport Layer Security (TLS), which is commonly referred to as "SSL". WARNING: Clusters that do not have encryption enabled send all data in plain text -including passwords and will not be able to install a license that enables -{security-features}. +including passwords. If the {es} {security-features} are enabled, unless you have a trial license, you must configure SSL/TLS for internode-communication. The following steps describe how to enable encryption across the various -components of the Elastic Stack. You must perform each of the steps that are +components of the {stack}. You must perform each of the steps that are applicable to your cluster. . Generate a private key and X.509 certificate for each of your {es} nodes. See @@ -22,14 +21,14 @@ enable TLS on the HTTP layer. See {ref}/configuring-tls.html#tls-transport[Encrypting Communications Between Nodes in a Cluster] and {ref}/configuring-tls.html#tls-http[Encrypting HTTP Client Communications]. -. Configure {monitoring} to use encrypted connections. See <>. +. Configure the {monitor-features} to use encrypted connections. See <>. . Configure {kib} to encrypt communications between the browser and the {kib} server and to connect to {es} via HTTPS. See -{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}]. +{kibana-ref}/using-kibana-with-security.html[Configuring security in {kib}]. . Configure Logstash to use TLS encryption. See -{logstash-ref}/ls-security.html[Configuring Security in Logstash]. +{logstash-ref}/ls-security.html[Configuring security in {ls}]. . Configure Beats to use encrypted connections. See <>. diff --git a/docs/reference/setup/bootstrap-checks-xes.asciidoc b/docs/reference/setup/bootstrap-checks-xes.asciidoc index df020bbd962..37c90e9f4d9 100644 --- a/docs/reference/setup/bootstrap-checks-xes.asciidoc +++ b/docs/reference/setup/bootstrap-checks-xes.asciidoc @@ -53,9 +53,8 @@ must also be valid. === SSL/TLS check //See TLSLicenseBootstrapCheck.java -In 6.0 and later releases, if you have a gold, platinum, or enterprise license -and {es} {security-features} are enabled, you must configure SSL/TLS for -internode-communication. +If you enable {es} {security-features}, unless you have a trial license, you +must configure SSL/TLS for internode-communication. NOTE: Single-node clusters that use a loopback interface do not have this requirement. For more information, see diff --git a/x-pack/docs/en/security/securing-communications.asciidoc b/x-pack/docs/en/security/securing-communications.asciidoc index 63fded729eb..6672c031649 100644 --- a/x-pack/docs/en/security/securing-communications.asciidoc +++ b/x-pack/docs/en/security/securing-communications.asciidoc @@ -5,8 +5,7 @@ Elasticsearch nodes store data that may be confidential. Attacks on the data may come from the network. These attacks could include sniffing of the data, manipulation of the data, and attempts to gain access to the server and thus the -files storing the data. Securing your nodes is required in order to use a production -license that enables {security-features} and helps reduce the risk from +files storing the data. Securing your nodes helps reduce the risk from network-based attacks. This section shows how to: From dbbdcea128af746d94d4b5a246026929d2b5bca2 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Mon, 20 May 2019 09:45:36 -0400 Subject: [PATCH 03/25] Update ciphers for TLSv1.3 and JDK11 if available (#42082) This commit updates the default ciphers and TLS protocols that are used when the runtime JDK supports them. New cipher support has been introduced in JDK 11 and 12 along with performance fixes for AES GCM. The ciphers are ordered with PFS ciphers being most preferred, then AEAD ciphers, and finally those with mainstream hardware support. When available stronger encryption is preferred for a given cipher. This is a backport of #41385 and #41808. There are known JDK bugs with TLSv1.3 that have been fixed in various versions. These are: 1. The JDK's bundled HttpsServer will endless loop under JDK11 and JDK 12.0 (Fixed in 12.0.1) based on the way the Apache HttpClient performs a close (half close). 2. In all versions of JDK 11 and 12, the HttpsServer will endless loop when certificates are not trusted or another handshake error occurs. An email has been sent to the openjdk security-dev list and #38646 is open to track this. 3. In JDK 11.0.2 and prior there is a race condition with session resumption that leads to handshake errors when multiple concurrent handshakes are going on between the same client and server. This bug does not appear when client authentication is in use. This is JDK-8213202, which was fixed in 11.0.3 and 12.0. 4. In JDK 11.0.2 and prior there is a bug where resumed TLS sessions do not retain peer certificate information. This is JDK-8212885. The way these issues are addressed is that the current java version is checked and used to determine the supported protocols for tests that provoke these issues. --- .../client/RestClientBuilderIntegTests.java | 37 +++++- .../settings/security-settings.asciidoc | 31 +++-- .../common/ssl/SslConfigurationLoader.java | 71 +++++++---- .../reindex/ReindexRestClientSslTests.java | 4 + .../AzureDiscoveryClusterFormationTests.java | 24 +++- .../xpack/core/XPackSettings.java | 83 +++++++++---- .../transport/ssl/certs/simple/testclient.jks | Bin 3358 -> 3893 bytes .../exporter/http/HttpExporterSslIT.java | 27 +++++ .../security/transport/nio/SSLDriver.java | 2 +- .../test/SecuritySettingsSource.java | 6 +- .../authc/pki/PkiAuthenticationTests.java | 9 +- .../security/authc/saml/SamlRealmTests.java | 25 ++++ ...stractSimpleSecurityTransportTestCase.java | 4 +- ...ServerTransportFilterIntegrationTests.java | 9 +- .../transport/ssl/EllipticCurveSSLTests.java | 5 +- .../transport/ssl/SslMultiPortTests.java | 51 ++++++-- .../xpack/ssl/SSLClientAuthTests.java | 41 ++++++- .../xpack/ssl/SSLReloadIntegTests.java | 4 + .../xpack/ssl/SSLTrustRestrictionsTests.java | 21 +++- .../ssl/certs/simple/README.asciidoc | 113 ++++++++++++++++++ .../transport/ssl/certs/simple/testclient.jks | Bin 3358 -> 3893 bytes .../transport/ssl/certs/simple/testnode.jks | Bin 7414 -> 9360 bytes .../transport/ssl/certs/simple/testnode.p12 | Bin 2654 -> 2699 bytes .../ssl/certs/simple/testnode_ec.crt | 13 ++ .../webhook/WebhookHttpsIntegrationTests.java | 27 +++++ .../watcher/common/http/HttpClientTests.java | 30 +++++ .../reindex-tests-with-security/build.gradle | 6 + 27 files changed, 554 insertions(+), 89 deletions(-) create mode 100644 x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java index 49eefc527ba..780cc447ba8 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderIntegTests.java @@ -38,8 +38,10 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.file.Files; import java.nio.file.Paths; +import java.security.AccessController; import java.security.KeyFactory; import java.security.KeyStore; +import java.security.PrivilegedAction; import java.security.cert.Certificate; import java.security.cert.CertificateFactory; import java.security.spec.PKCS8EncodedKeySpec; @@ -106,7 +108,7 @@ public class RestClientBuilderIntegTests extends RestClientTestCase { } private static SSLContext getSslContext() throws Exception { - SSLContext sslContext = SSLContext.getInstance("TLS"); + SSLContext sslContext = SSLContext.getInstance(getProtocol()); try (InputStream certFile = RestClientBuilderIntegTests.class.getResourceAsStream("/test.crt")) { // Build a keystore of default type programmatically since we can't use JKS keystores to // init a KeyManagerFactory in FIPS 140 JVMs. @@ -126,4 +128,37 @@ public class RestClientBuilderIntegTests extends RestClientTestCase { } return sslContext; } + + /** + * The {@link HttpsServer} in the JDK has issues with TLSv1.3 when running in a JDK that supports TLSv1.3 prior to + * 12.0.1 so we pin to TLSv1.2 when running on an earlier JDK. + */ + private static String getProtocol() { + String version = AccessController.doPrivileged((PrivilegedAction) () -> System.getProperty("java.version")); + String[] components = version.split("\\."); + if (components.length > 0) { + final int major = Integer.valueOf(components[0]); + if (major < 11) { + return "TLS"; + } if (major > 12) { + return "TLS"; + } else if (major == 12 && components.length > 2) { + final int minor = Integer.valueOf(components[1]); + if (minor > 0) { + return "TLS"; + } else { + String patch = components[2]; + final int index = patch.indexOf("_"); + if (index > -1) { + patch = patch.substring(0, index); + } + + if (Integer.valueOf(patch) >= 1) { + return "TLS"; + } + } + } + } + return "TLSv1.2"; + } } diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 426605f63d7..4185cce13c3 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -1526,13 +1526,30 @@ Controls the verification of certificates. Valid values are: The default value is `full`. `*.ssl.cipher_suites`:: -Supported cipher suites can be found in Oracle's http://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html[ -Java Cryptography Architecture documentation]. Defaults to `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256`, -`TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256`, `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA`, `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA`, -`TLS_RSA_WITH_AES_128_CBC_SHA256`, `TLS_RSA_WITH_AES_128_CBC_SHA`. If the _Java Cryptography Extension (JCE) Unlimited Strength -Jurisdiction Policy Files_ has been installed, the default value also includes `TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384`, -`TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384`, `TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA`, `TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA`, -`TLS_RSA_WITH_AES_256_CBC_SHA256`, `TLS_RSA_WITH_AES_256_CBC_SHA`. +Supported cipher suites can be found in Oracle's +https://docs.oracle.com/en/java/javase/11/security/oracle-providers.html#GUID-7093246A-31A3-4304-AC5F-5FB6400405E2[Java + Cryptography Architecture documentation]. +Defaults to `TLS_AES_256_GCM_SHA384`, `TLS_AES_128_GCM_SHA256`, +`TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`, `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`, +`TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`, `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`, +`TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384`, `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256`, +`TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384`, `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256`, +`TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA`, `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA`, +`TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA`, `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA`, +`TLS_RSA_WITH_AES_256_GCM_SHA384`, `TLS_RSA_WITH_AES_128_GCM_SHA256`, +`TLS_RSA_WITH_AES_256_CBC_SHA256`, `TLS_RSA_WITH_AES_128_CBC_SHA256`, +`TLS_RSA_WITH_AES_256_CBC_SHA`, `TLS_RSA_WITH_AES_128_CBC_SHA`. ++ +-- +NOTE: The default cipher suites list above includes TLSv1.3 ciphers and ciphers +that require the _Java Cryptography Extension (JCE) Unlimited Strength +Jurisdiction Policy Files_ for 256-bit AES encryption. If TLSv1.3 is not +available, the TLSv1.3 ciphers TLS_AES_256_GCM_SHA384`, `TLS_AES_128_GCM_SHA256` +will not be included in the default list. If 256-bit AES is unavailable, ciphers +with `AES_256` in their names wil not be included in the default list. Finally, +AES GCM has known performance issues in Java versions prior to 11 and will only +be included in the default list when using Java 11 or above. +-- [float] [[tls-ssl-key-settings]] diff --git a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java index 6e511565a9f..e53600c2514 100644 --- a/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java +++ b/libs/ssl-config/src/main/java/org/elasticsearch/common/ssl/SslConfigurationLoader.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.ssl; +import org.elasticsearch.bootstrap.JavaVersion; + import javax.crypto.Cipher; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.TrustManagerFactory; @@ -338,30 +340,53 @@ public abstract class SslConfigurationLoader { } private static List loadDefaultCiphers() { - final List ciphers128 = Arrays.asList( - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - "TLS_RSA_WITH_AES_128_CBC_SHA256", - "TLS_RSA_WITH_AES_128_CBC_SHA" - ); - final List ciphers256 = Arrays.asList( - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - "TLS_RSA_WITH_AES_256_CBC_SHA256", - "TLS_RSA_WITH_AES_256_CBC_SHA" - ); - if (has256BitAES()) { - List ciphers = new ArrayList<>(ciphers256.size() + ciphers128.size()); - ciphers.addAll(ciphers256); - ciphers.addAll(ciphers128); - return ciphers; - } else { - return ciphers128; + final boolean has256BitAES = has256BitAES(); + final boolean useGCM = JavaVersion.current().compareTo(JavaVersion.parse("11")) >= 0; + final boolean tlsV13Supported = DEFAULT_PROTOCOLS.contains("TLSv1.3"); + List ciphers = new ArrayList<>(); + if (tlsV13Supported) { // TLSv1.3 cipher has PFS, AEAD, hardware support + if (has256BitAES) { + ciphers.add("TLS_AES_256_GCM_SHA384"); + } + ciphers.add("TLS_AES_128_GCM_SHA256"); } + if (useGCM) { // PFS, AEAD, hardware support + if (has256BitAES) { + ciphers.addAll(Arrays.asList("TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")); + } else { + ciphers.addAll(Arrays.asList("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")); + } + } + + // PFS, hardware support + if (has256BitAES) { + ciphers.addAll(Arrays.asList("TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA")); + } else { + ciphers.addAll(Arrays.asList("TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA")); + } + + // AEAD, hardware support + if (useGCM) { + if (has256BitAES) { + ciphers.addAll(Arrays.asList("TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256")); + } else { + ciphers.add("TLS_RSA_WITH_AES_128_GCM_SHA256"); + } + } + + // hardware support + if (has256BitAES) { + ciphers.addAll(Arrays.asList("TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA")); + } else { + ciphers.addAll(Arrays.asList("TLS_RSA_WITH_AES_128_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA")); + } + return ciphers; } private static boolean has256BitAES() { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java index 7c94b94bbb1..a2e00e66fd2 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexRestClientSslTests.java @@ -120,6 +120,7 @@ public class ReindexRestClientSslTests extends ESTestCase { final List threads = new ArrayList<>(); final Settings settings = Settings.builder() .put("path.home", createTempDir()) + .put("reindex.ssl.supported_protocols", "TLSv1.2") .build(); final Environment environment = TestEnvironment.newEnvironment(settings); final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class)); @@ -134,6 +135,7 @@ public class ReindexRestClientSslTests extends ESTestCase { final Settings settings = Settings.builder() .put("path.home", createTempDir()) .putList("reindex.ssl.certificate_authorities", ca.toString()) + .put("reindex.ssl.supported_protocols", "TLSv1.2") .build(); final Environment environment = TestEnvironment.newEnvironment(settings); final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class)); @@ -149,6 +151,7 @@ public class ReindexRestClientSslTests extends ESTestCase { final Settings settings = Settings.builder() .put("path.home", createTempDir()) .put("reindex.ssl.verification_mode", "NONE") + .put("reindex.ssl.supported_protocols", "TLSv1.2") .build(); final Environment environment = TestEnvironment.newEnvironment(settings); final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class)); @@ -169,6 +172,7 @@ public class ReindexRestClientSslTests extends ESTestCase { .put("reindex.ssl.certificate", cert) .put("reindex.ssl.key", key) .put("reindex.ssl.key_passphrase", "client-password") + .put("reindex.ssl.supported_protocols", "TLSv1.2") .build(); AtomicReference clientCertificates = new AtomicReference<>(); handler = https -> { diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java index 1ee2e249072..8bfb373f644 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java @@ -25,6 +25,7 @@ import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpsConfigurator; import com.sun.net.httpserver.HttpsServer; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.FileSystemUtils; @@ -59,7 +60,9 @@ import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.security.AccessController; import java.security.KeyStore; +import java.security.PrivilegedAction; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -262,11 +265,30 @@ public class AzureDiscoveryClusterFormationTests extends ESIntegTestCase { kmf.init(ks, passphrase); TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509"); tmf.init(ks); - SSLContext ssl = SSLContext.getInstance("TLS"); + SSLContext ssl = SSLContext.getInstance(getProtocol()); ssl.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null); return ssl; } + /** + * The {@link HttpsServer} in the JDK has issues with TLSv1.3 when running in a JDK prior to + * 12.0.1 so we pin to TLSv1.2 when running on an earlier JDK + */ + private static String getProtocol() { + if (JavaVersion.current().compareTo(JavaVersion.parse("11")) < 0) { + return "TLS"; + } else if (JavaVersion.current().compareTo(JavaVersion.parse("12")) < 0) { + return "TLSv1.2"; + } else { + JavaVersion full = + AccessController.doPrivileged((PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); + if (full.compareTo(JavaVersion.parse("12.0.1")) < 0) { + return "TLSv1.2"; + } + } + return "TLS"; + } + @AfterClass public static void stopHttpd() throws IOException { for (int i = 0; i < internalCluster().size(); i++) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index 0eeb173b8b8..4f6202f2b5c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core; import org.apache.logging.log4j.LogManager; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.xpack.core.security.SecurityField; @@ -118,6 +119,20 @@ public class XPackSettings { /** Setting for enabling or disabling sql. Defaults to true. */ public static final Setting SQL_ENABLED = Setting.boolSetting("xpack.sql.enabled", true, Setting.Property.NodeScope); + public static final List DEFAULT_SUPPORTED_PROTOCOLS; + + static { + boolean supportsTLSv13 = false; + try { + SSLContext.getInstance("TLSv1.3"); + supportsTLSv13 = true; + } catch (NoSuchAlgorithmException e) { + LogManager.getLogger(XPackSettings.class).debug("TLSv1.3 is not supported", e); + } + DEFAULT_SUPPORTED_PROTOCOLS = supportsTLSv13 ? + Arrays.asList("TLSv1.3", "TLSv1.2", "TLSv1.1") : Arrays.asList("TLSv1.2", "TLSv1.1"); + } + /* * SSL settings. These are the settings that are specifically registered for SSL. Many are private as we do not explicitly use them * but instead parse based on a prefix (eg *.ssl.*) @@ -125,24 +140,58 @@ public class XPackSettings { public static final List DEFAULT_CIPHERS; static { - List ciphers = Arrays.asList("TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA256", - "TLS_RSA_WITH_AES_128_CBC_SHA"); + List ciphers = new ArrayList<>(); + final boolean useGCM = JavaVersion.current().compareTo(JavaVersion.parse("11")) >= 0; + final boolean tlsV13Supported = DEFAULT_SUPPORTED_PROTOCOLS.contains("TLSv1.3"); try { final boolean use256Bit = Cipher.getMaxAllowedKeyLength("AES") > 128; + if (tlsV13Supported) { // TLSv1.3 cipher has PFS, AEAD, hardware support + if (use256Bit) { + ciphers.add("TLS_AES_256_GCM_SHA384"); + } + ciphers.add("TLS_AES_128_GCM_SHA256"); + } + if (useGCM) { // PFS, AEAD, hardware support + if (use256Bit) { + ciphers.addAll(Arrays.asList("TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")); + } else { + ciphers.addAll(Arrays.asList("TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")); + } + } + + // PFS, hardware support if (use256Bit) { - List strongerCiphers = new ArrayList<>(ciphers.size() * 2); - strongerCiphers.addAll(Arrays.asList("TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_256_CBC_SHA")); - strongerCiphers.addAll(ciphers); - ciphers = strongerCiphers; + ciphers.addAll(Arrays.asList("TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA")); + } else { + ciphers.addAll(Arrays.asList("TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA")); + } + + // AEAD, hardware support + if (useGCM) { + if (use256Bit) { + ciphers.addAll(Arrays.asList("TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_GCM_SHA256")); + } else { + ciphers.add("TLS_RSA_WITH_AES_128_GCM_SHA256"); + } + } + + // hardware support + if (use256Bit) { + ciphers.addAll(Arrays.asList("TLS_RSA_WITH_AES_256_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_CBC_SHA")); + } else { + ciphers.addAll(Arrays.asList("TLS_RSA_WITH_AES_128_CBC_SHA256", "TLS_RSA_WITH_AES_128_CBC_SHA")); } } catch (NoSuchAlgorithmException e) { // ignore it here - there will be issues elsewhere and its not nice to throw in a static initializer } - DEFAULT_CIPHERS = ciphers; + DEFAULT_CIPHERS = Collections.unmodifiableList(ciphers); } /* @@ -164,20 +213,6 @@ public class XPackSettings { } }, Setting.Property.NodeScope); - public static final List DEFAULT_SUPPORTED_PROTOCOLS; - - static { - boolean supportsTLSv13 = false; - try { - SSLContext.getInstance("TLSv1.3"); - supportsTLSv13 = true; - } catch (NoSuchAlgorithmException e) { - LogManager.getLogger(XPackSettings.class).debug("TLSv1.3 is not supported", e); - } - DEFAULT_SUPPORTED_PROTOCOLS = supportsTLSv13 ? - Arrays.asList("TLSv1.3", "TLSv1.2", "TLSv1.1") : Arrays.asList("TLSv1.2", "TLSv1.1"); - } - public static final SSLClientAuth CLIENT_AUTH_DEFAULT = SSLClientAuth.REQUIRED; public static final SSLClientAuth HTTP_CLIENT_AUTH_DEFAULT = SSLClientAuth.NONE; public static final VerificationMode VERIFICATION_MODE_DEFAULT = VerificationMode.FULL; diff --git a/x-pack/plugin/core/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.jks b/x-pack/plugin/core/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.jks index d6dc21c1bd5ff4a248bfba5a171518cd649e278d..4f897d80f4af494c8fffc0acab1cc1365fe28353 100644 GIT binary patch delta 375 zcmbOywN;Mi-`jt085kItfS7qB&p#e_Fo(M&wYVfNKP5FjHJO2dF)J%guato`LeJE| z63G2*(8Ty2h$k;#W@2PwVkyc{ur}af{g@iKQ3k4>IoV_Ez>XMNQ!yR}OW zR4#7ZV$itJK$eX;RF;oLj77wkA$j+^R7L3}6ZW)T@ZA+7&KZAmat{0CY!ufsdoUQd zGAX1N>6fKh>V!uhYYp1GD)iQ+%Afw`cQo=YKP$HD`>hztq{wjbcwlJP)x0xOS%*ZX t{hXD(J;mED(&^DTMh-4FK6#~Uvvt=vNToT>FF1T~kF;@=kk getProtocols() { + if (JavaVersion.current().compareTo(JavaVersion.parse("11")) < 0) { + return XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; + } else if (JavaVersion.current().compareTo(JavaVersion.parse("12")) < 0) { + return Collections.singletonList("TLSv1.2"); + } else { + JavaVersion full = + AccessController.doPrivileged((PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); + if (full.compareTo(JavaVersion.parse("12.0.1")) < 0) { + return Collections.singletonList("TLSv1.2"); + } + } + return XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java index e54bc9fa16e..c5dcb260919 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java @@ -438,7 +438,7 @@ public class SSLDriver implements AutoCloseable { SSLEngineResult result = unwrap(encryptedBuffer, applicationBuffer); boolean renegotiationRequested = result.getStatus() != SSLEngineResult.Status.CLOSED && maybeRenegotiation(result.getHandshakeStatus()); - continueUnwrap = result.bytesProduced() > 0 && renegotiationRequested == false; + continueUnwrap = result.bytesConsumed() > 0 && renegotiationRequested == false; } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java index a8ad2fbd4aa..491fd66c453 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecuritySettingsSource.java @@ -218,7 +218,8 @@ public class SecuritySettingsSource extends NodeConfigurationSource { "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/active-directory-ca.crt", "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt", "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/openldap.crt", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"), + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt"), hostnameVerificationEnabled, false); } @@ -244,7 +245,8 @@ public class SecuritySettingsSource extends NodeConfigurationSource { "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.pem", "testclient", "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt", Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", - "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt"), + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt"), hostnameVerificationEnabled, true); } else { addSSLSettingsForStore(builder, prefix, "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.jks", diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java index 52c87c75a13..03d107a50a8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiAuthenticationTests.java @@ -66,8 +66,9 @@ public class PkiAuthenticationTests extends SecuritySingleNodeTestCase { .put("xpack.security.http.ssl.client_authentication", sslClientAuth) .put("xpack.security.authc.realms.file.file.order", "0") .put("xpack.security.authc.realms.pki.pki1.order", "1") - .put("xpack.security.authc.realms.pki.pki1.certificate_authorities", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .putList("xpack.security.authc.realms.pki.pki1.certificate_authorities", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt").toString(), + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt").toString()) .put("xpack.security.authc.realms.pki.pki1.files.role_mapping", getDataPath("role_mapping.yml")); return builder.build(); } @@ -91,8 +92,8 @@ public class PkiAuthenticationTests extends SecuritySingleNodeTestCase { "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem", "testnode", "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", - Arrays.asList - ("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt")); try (TransportClient client = createTransportClient(builder.build())) { client.addTransportAddress(randomFrom(node().injector().getInstance(Transport.class).boundAddress().boundAddresses())); IndexResponse response = client.prepareIndex("foo", "bar").setSource("pki", "auth").get(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java index d139d99bf9c..aea50691119 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java @@ -5,9 +5,11 @@ */ package org.elasticsearch.xpack.security.authc.saml; +import com.sun.net.httpserver.HttpsServer; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; @@ -19,6 +21,7 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; @@ -52,8 +55,10 @@ import java.io.OutputStream; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.security.AccessController; import java.security.KeyStore; import java.security.PrivateKey; +import java.security.PrivilegedAction; import java.security.PrivilegedActionException; import java.security.PublicKey; import java.security.cert.Certificate; @@ -131,6 +136,7 @@ public class SamlRealmTests extends SamlTestCase { getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) .put("xpack.security.http.ssl.certificate_authorities", getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .putList("xpack.security.http.ssl.supported_protocols", getProtocols()) .put("path.home", createTempDir()) .setSecureSettings(mockSecureSettings) .build(); @@ -715,4 +721,23 @@ public class SamlRealmTests extends SamlTestCase { assertEquals(SAMLConstants.SAML2_POST_BINDING_URI, ssoServices.get(0).getBinding()); assertEquals(SAMLConstants.SAML2_REDIRECT_BINDING_URI, ssoServices.get(1).getBinding()); } + + /** + * The {@link HttpsServer} in the JDK has issues with TLSv1.3 when running in a JDK prior to + * 12.0.1 so we pin to TLSv1.2 when running on an earlier JDK + */ + private static List getProtocols() { + if (JavaVersion.current().compareTo(JavaVersion.parse("11")) < 0) { + return XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; + } else if (JavaVersion.current().compareTo(JavaVersion.parse("12")) < 0) { + return Collections.singletonList("TLSv1.2"); + } else { + JavaVersion full = + AccessController.doPrivileged((PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); + if (full.compareTo(JavaVersion.parse("12.0.1")) < 0) { + return Collections.singletonList("TLSv1.2"); + } + } + return XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/AbstractSimpleSecurityTransportTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/AbstractSimpleSecurityTransportTestCase.java index ca548251d37..1b7a7f8d870 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/AbstractSimpleSecurityTransportTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/AbstractSimpleSecurityTransportTestCase.java @@ -155,7 +155,9 @@ public abstract class AbstractSimpleSecurityTransportTestCase extends AbstractSi @SuppressForbidden(reason = "Need to open socket connection") public void testRenegotiation() throws Exception { - SSLService sslService = createSSLService(); + // force TLSv1.2 since renegotiation is not supported by 1.3 + SSLService sslService = + createSSLService(Settings.builder().put("xpack.security.transport.ssl.supported_protocols", "TLSv1.2").build()); final SSLConfiguration sslConfiguration = sslService.getSSLConfiguration("xpack.security.transport.ssl"); SocketFactory factory = sslService.sslSocketFactory(sslConfiguration); try (SSLSocket socket = (SSLSocket) factory.createSocket()) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java index f8bf1f47a38..e3f561c4711 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ServerTransportFilterIntegrationTests.java @@ -19,7 +19,6 @@ import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.SecurityIntegTestCase; -import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySettingsSourceField; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectionProfile; @@ -80,8 +79,6 @@ public class ServerTransportFilterIntegrationTests extends SecurityIntegTestCase settingsBuilder.put("transport.profiles.default.xpack.security.type", "node"); // this is default lets set it randomly } - SecuritySettingsSource.addSecureSettings(settingsBuilder, secureSettings -> - secureSettings.setString("transport.profiles.client.xpack.security.ssl.keystore.secure_password", "testnode")); return settingsBuilder.build(); } @@ -112,7 +109,8 @@ public class ServerTransportFilterIntegrationTests extends SecurityIntegTestCase "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem", "testnode", "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", - Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt")); try (Node node = new MockNode(nodeSettings.build(), mockPlugins)) { node.start(); ensureStableCluster(cluster().size() + 1); @@ -151,7 +149,8 @@ public class ServerTransportFilterIntegrationTests extends SecurityIntegTestCase "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem", "testnode", "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", - Collections.singletonList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt")); try (Node node = new MockNode(nodeSettings.build(), mockPlugins)) { node.start(); TransportService instance = node.injector().getInstance(TransportService.class); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java index 5f0f3c94e36..761726c5f67 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/EllipticCurveSSLTests.java @@ -33,7 +33,9 @@ import java.util.Collections; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; public class EllipticCurveSSLTests extends SecurityIntegTestCase { @@ -106,7 +108,8 @@ public class EllipticCurveSSLTests extends SecurityIntegTestCase { Certificate[] peerChain = session.getPeerCertificates(); assertEquals(1, peerChain.length); assertEquals(certs[0], peerChain[0]); - assertThat(session.getCipherSuite(), containsString("ECDSA")); + assertThat(session.getCipherSuite(), + anyOf(containsString("ECDSA"), equalTo("TLS_AES_256_GCM_SHA384"), equalTo("TLS_AES_128_GCM_SHA256"))); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslMultiPortTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslMultiPortTests.java index 04c1ff03f82..f12304bd885 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslMultiPortTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/ssl/SslMultiPortTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.transport.ssl; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.client.transport.NoNodeAvailableException; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.network.NetworkAddress; @@ -13,6 +14,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.transport.Transport; import org.elasticsearch.xpack.core.TestXPackTransportClient; +import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.ssl.SSLClientAuth; import org.elasticsearch.xpack.security.LocalStateSecurity; @@ -21,8 +23,11 @@ import org.junit.BeforeClass; import java.net.InetAddress; import java.nio.file.Files; import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.Arrays; import java.util.Collections; +import java.util.List; import static org.elasticsearch.test.SecuritySettingsSource.TEST_USER_NAME; import static org.elasticsearch.test.SecuritySettingsSource.addSSLSettingsForNodePEMFiles; @@ -116,6 +121,7 @@ public class SslMultiPortTests extends SecurityIntegTestCase { try(TransportClient transportClient = new TestXPackTransportClient(Settings.builder() .put(transportClientSettings()) .put("xpack.security.transport.ssl.enabled", true) + .putList("xpack.security.transport.ssl.supported_protocols", getProtocols()) .put("node.name", "programmatic_transport_client") .put("cluster.name", internalCluster().getClusterName()) .build(), LocalStateSecurity.class)) { @@ -154,7 +160,8 @@ public class SslMultiPortTests extends SecurityIntegTestCase { "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem", "testclient-client-profile", "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt", - Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt")); try (TransportClient transportClient = createTransportClient(builder.build())) { transportClient.addTransportAddress(new TransportAddress(localAddress, getProfilePort("client"))); assertGreenClusterState(transportClient); @@ -174,7 +181,9 @@ public class SslMultiPortTests extends SecurityIntegTestCase { "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem", "testclient-client-profile", "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt", - Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt")); + builder.putList("xpack.security.transport.ssl.supported_protocols", getProtocols()); try (TransportClient transportClient = createTransportClient(builder.build())) { transportClient.addTransportAddress(new TransportAddress(localAddress, getProfilePort("no_client_auth"))); @@ -195,7 +204,8 @@ public class SslMultiPortTests extends SecurityIntegTestCase { "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem", "testclient-client-profile", "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt", - Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")); + Arrays.asList("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt", + "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt")); try (TransportClient transportClient = createTransportClient(builder.build())) { TransportAddress transportAddress = randomFrom(internalCluster().getInstance(Transport.class).boundAddress().boundAddresses()); transportClient.addTransportAddress(transportAddress); @@ -273,8 +283,10 @@ public class SslMultiPortTests extends SecurityIntegTestCase { .put(SecurityField.USER_SETTING.getKey(), TEST_USER_NAME + ":" + TEST_PASSWORD) .put("cluster.name", internalCluster().getClusterName()) .put("xpack.security.transport.ssl.enabled", true) - .put("xpack.security.transport.ssl.certificate_authorities", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .putList("xpack.security.transport.ssl.certificate_authorities", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt").toString(), + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt").toString()) + .putList("xpack.security.transport.ssl.supported_protocols", getProtocols()) .build(); try (TransportClient transportClient = new TestXPackTransportClient(settings, Collections.singletonList(LocalStateSecurity.class))) { @@ -294,8 +306,10 @@ public class SslMultiPortTests extends SecurityIntegTestCase { .put("cluster.name", internalCluster().getClusterName()) .put("xpack.security.transport.ssl.enabled", true) .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.REQUIRED) - .put("xpack.security.transport.ssl.certificate_authorities", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .putList("xpack.security.transport.ssl.certificate_authorities", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt").toString(), + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt").toString()) + .putList("xpack.security.transport.ssl.supported_protocols", getProtocols()) .build(); try (TransportClient transportClient = new TestXPackTransportClient(settings, Collections.singletonList(LocalStateSecurity.class))) { @@ -318,8 +332,10 @@ public class SslMultiPortTests extends SecurityIntegTestCase { .put("cluster.name", internalCluster().getClusterName()) .put("xpack.security.transport.ssl.enabled", true) .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.REQUIRED) - .put("xpack.security.transport.ssl.certificate_authorities", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .putList("xpack.security.transport.ssl.certificate_authorities", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt").toString(), + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt").toString()) + .putList("xpack.security.transport.ssl.supported_protocols", getProtocols()) .build(); try (TransportClient transportClient = new TestXPackTransportClient(settings, Collections.singletonList(LocalStateSecurity.class))) { @@ -409,4 +425,21 @@ public class SslMultiPortTests extends SecurityIntegTestCase { throw new IllegalStateException("failed to find transport address equal to [" + NetworkAddress.format(localAddress) + "] " + " in the following bound addresses " + Arrays.toString(transportAddresses)); } + + /** + * TLSv1.3 when running in a JDK prior to 11.0.3 has a race condition when multiple simultaneous connections are established. See + * JDK-8213202. This issue is not triggered when using client authentication, which we do by default for transport connections. + * However if client authentication is turned off and TLSv1.3 is used on the affected JVMs then we will hit this issue. + */ + private static List getProtocols() { + if (JavaVersion.current().compareTo(JavaVersion.parse("11")) < 0) { + return XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; + } + JavaVersion full = + AccessController.doPrivileged((PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); + if (full.compareTo(JavaVersion.parse("11.0.3")) < 0) { + return Collections.singletonList("TLSv1.2"); + } + return XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java index 7075a677a26..85f18ddff92 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLClientAuthTests.java @@ -11,6 +11,7 @@ import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -23,6 +24,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.SecurityIntegTestCase; import org.elasticsearch.transport.Transport; import org.elasticsearch.xpack.core.TestXPackTransportClient; +import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; import org.elasticsearch.xpack.core.ssl.PemUtils; @@ -38,11 +40,14 @@ import java.io.InputStream; import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.security.SecureRandom; import java.security.cert.CertPathBuilderException; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; import static org.hamcrest.Matchers.containsString; @@ -101,7 +106,7 @@ public class SSLClientAuthTests extends SecurityIntegTestCase { return true; } - public void testThatHttpFailsWithoutSslClientAuth() throws IOException { + public void testThatHttpFailsWithoutSslClientAuth() { SSLIOSessionStrategy sessionStrategy = new SSLIOSessionStrategy(SSLContexts.createDefault(), NoopHostnameVerifier.INSTANCE); try (RestClient restClient = createRestClient(httpClientBuilder -> httpClientBuilder.setSSLStrategy(sessionStrategy), "https")) { restClient.performRequest(new Request("GET", "/")); @@ -130,11 +135,12 @@ public class SSLClientAuthTests extends SecurityIntegTestCase { } } - public void testThatTransportWorksWithoutSslClientAuth() throws IOException { + public void testThatTransportWorksWithoutSslClientAuth() { // specify an arbitrary key and certificate - not the certs needed to connect to the transport protocol Path keyPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.pem"); Path certPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient-client-profile.crt"); Path nodeCertPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"); + Path nodeECCertPath = getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt"); if (Files.notExists(keyPath) || Files.notExists(certPath)) { throw new ElasticsearchException("key or certificate path doesn't exist"); @@ -147,7 +153,8 @@ public class SSLClientAuthTests extends SecurityIntegTestCase { .put("xpack.security.transport.ssl.client_authentication", SSLClientAuth.NONE) .put("xpack.security.transport.ssl.key", keyPath) .put("xpack.security.transport.ssl.certificate", certPath) - .put("xpack.security.transport.ssl.certificate_authorities", nodeCertPath) + .putList("xpack.security.transport.ssl.supported_protocols", getProtocols()) + .putList("xpack.security.transport.ssl.certificate_authorities", nodeCertPath.toString(), nodeECCertPath.toString()) .setSecureSettings(secureSettings) .put("cluster.name", internalCluster().getClusterName()) .put(SecurityField.USER_SETTING.getKey(), transportClientUsername() + ":" + new String(transportClientPassword().getChars())) @@ -165,12 +172,19 @@ public class SSLClientAuthTests extends SecurityIntegTestCase { try { String certPath = "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt"; String nodeCertPath = "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"; + String nodeEcCertPath = "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt"; String keyPath = "/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.pem"; TrustManager tm = CertParsingUtils.trustManager(CertParsingUtils.readCertificates(Arrays.asList(getDataPath - (certPath), getDataPath(nodeCertPath)))); + (certPath), getDataPath(nodeCertPath), getDataPath(nodeEcCertPath)))); KeyManager km = CertParsingUtils.keyManager(CertParsingUtils.readCertificates(Collections.singletonList(getDataPath (certPath))), PemUtils.readPrivateKey(getDataPath(keyPath), "testclient"::toCharArray), "testclient".toCharArray()); - SSLContext context = SSLContext.getInstance("TLSv1.2"); + + final SSLContext context; + if (XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS.contains("TLSv1.3")) { + context = SSLContext.getInstance(randomBoolean() ? "TLSv1.3" : "TLSv1.2"); + } else { + context = SSLContext.getInstance("TLSv1.2"); + } context.init(new KeyManager[] { km }, new TrustManager[] { tm }, new SecureRandom()); return context; } catch (Exception e) { @@ -188,4 +202,21 @@ public class SSLClientAuthTests extends SecurityIntegTestCase { } return baos.toByteArray(); } + + /** + * TLSv1.3 when running in a JDK prior to 11.0.3 has a race condition when multiple simultaneous connections are established. See + * JDK-8213202. This issue is not triggered when using client authentication, which we do by default for transport connections. + * However if client authentication is turned off and TLSv1.3 is used on the affected JVMs then we will hit this issue. + */ + private static List getProtocols() { + if (JavaVersion.current().compareTo(JavaVersion.parse("11")) < 0) { + return XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; + } + JavaVersion full = + AccessController.doPrivileged((PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); + if (full.compareTo(JavaVersion.parse("11.0.3")) < 0) { + return Collections.singletonList("TLSv1.2"); + } + return XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLReloadIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLReloadIntegTests.java index dd6985889d7..3eeaca1a3f1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLReloadIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLReloadIntegTests.java @@ -115,6 +115,10 @@ public class SSLReloadIntegTests extends SecurityIntegTestCase { try (SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(address.getAddress(), address.getPort())) { assertThat(socket.isConnected(), is(true)); socket.startHandshake(); + if (socket.getSession().getProtocol().equals("TLSv1.3")) { + // blocking read for TLSv1.3 to see if the other side closed the connection + socket.getInputStream().read(); + } fail("handshake should not have been successful!"); } catch (SSLException | SocketException expected) { logger.trace("expected exception", expected); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java index a89b8fcdd69..944c3306763 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java @@ -31,6 +31,7 @@ import javax.net.ssl.SSLSocket; import javax.net.ssl.SSLSocketFactory; import java.io.IOException; import java.net.SocketException; +import java.net.SocketTimeoutException; import java.nio.file.AtomicMoveNotSupportedException; import java.nio.file.Files; import java.nio.file.Path; @@ -165,7 +166,7 @@ public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { public void testCertificateWithTrustedNameIsAccepted() throws Exception { writeRestrictions("*.trusted"); try { - tryConnect(trustedCert); + tryConnect(trustedCert, false); } catch (SSLException | SocketException ex) { logger.warn(new ParameterizedMessage("unexpected handshake failure with certificate [{}] [{}]", trustedCert.certificate.getSubjectDN(), trustedCert.certificate.getSubjectAlternativeNames()), ex); @@ -176,7 +177,7 @@ public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { public void testCertificateWithUntrustedNameFails() throws Exception { writeRestrictions("*.trusted"); try { - tryConnect(untrustedCert); + tryConnect(untrustedCert, true); fail("handshake should have failed, but was successful"); } catch (SSLException | SocketException ex) { // expected @@ -187,7 +188,7 @@ public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { writeRestrictions("*"); assertBusy(() -> { try { - tryConnect(untrustedCert); + tryConnect(untrustedCert, false); } catch (SSLException | SocketException ex) { fail("handshake should have been successful, but failed with " + ex); } @@ -196,7 +197,7 @@ public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { writeRestrictions("*.trusted"); assertBusy(() -> { try { - tryConnect(untrustedCert); + tryConnect(untrustedCert, true); fail("handshake should have failed, but was successful"); } catch (SSLException | SocketException ex) { // expected @@ -221,7 +222,7 @@ public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { } } - private void tryConnect(CertificateInfo certificate) throws Exception { + private void tryConnect(CertificateInfo certificate, boolean shouldFail) throws Exception { Settings settings = Settings.builder() .put("path.home", createTempDir()) .put("xpack.security.transport.ssl.key", certificate.getKeyPath()) @@ -239,6 +240,16 @@ public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { assertThat(socket.isConnected(), is(true)); // The test simply relies on this (synchronously) connecting (or not), so we don't need a handshake handler socket.startHandshake(); + + // blocking read for TLSv1.3 to see if the other side closed the connection + if (socket.getSession().getProtocol().equals("TLSv1.3")) { + if (shouldFail) { + socket.getInputStream().read(); + } else { + socket.setSoTimeout(1000); // 1 second timeout + expectThrows(SocketTimeoutException.class, () -> socket.getInputStream().read()); + } + } } } diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/README.asciidoc b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/README.asciidoc index 5b2a6b737d7..0136e967106 100644 --- a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/README.asciidoc +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/README.asciidoc @@ -34,3 +34,116 @@ keytool -importkeystore -destkeystore .jks -srckeystore .p12 -srcsto The keystore is now created and has the private/public key pair. You can import additional trusted certificates using `keytool -importcert`. When doing so make sure to specify an alias so that others can recreate the keystore if necessary. + +=== Changes and additions for removing Bouncy Castle Dependency + +`testnode-unprotected.pem` is simply the decrypted `testnode.pem` +------ +openssl rsa -in testnode.pem -out testnode-unprotected.pem +------ + +`rsa_key_pkcs8_plain.pem` is the same plaintext key encoded in `PKCS#8` +------ +openssl pkcs8 -topk8 -inform PEM -outform PEM -in testnode-unprotected.pem -out rsa_key_pkcs8_plain.pem -nocrypt +------ + +`testnode-aes{128,192,256}.pem` is the testnode.pem private key, encrypted with `AES-128`, `AES-192` and `AES-256` +respectively, encoded in `PKCS#1` +[source,shell] +------ +openssl rsa -aes128 -in testnode-unprotected.pem -out testnode-aes128.pem +------ +[source,shell] +------ +openssl rsa -aes192 -in testnode-unprotected.pem -out testnode-aes192.pem +------ +[source,shell] +------ +openssl rsa -aes256 -in testnode-unprotected.pem -out testnode-aes256.pem +------ + +Adding `DSA` and `EC` Keys to the Keystore + +[source,shell] +------ +keytool -genkeypair -keyalg DSA -alias testnode_dsa -keystore testnode.jks -storepass testnode \ + -keypass testnode -validity 10000 -keysize 1024 -dname "CN=Elasticsearch Test Node" \ + -ext SAN=dns:localhost,dns:localhost.localdomain,dns:localhost4,dns:localhost4.localdomain4,dns:localhost6,dns:localhost6.localdomain6,ip:127.0.0.1,ip:0:0:0:0:0:0:0:1 +------ +[source,shell] +------ +keytool -genkeypair -keyalg EC -alias testnode_ec -keystore testnode.jks -storepass testnode \ + -keypass testnode -validity 10000 -keysize 256 -dname "CN=Elasticsearch Test Node" \ + -ext SAN=dns:localhost,dns:localhost.localdomain,dns:localhost4,dns:localhost4.localdomain4,dns:localhost6,dns:localhost6.localdomain6,ip:127.0.0.1,ip:0:0:0:0:0:0:0:1 +------ + +Exporting the `DSA` and `EC` private keys from the keystore + +[source,shell] +---- +keytool -importkeystore -srckeystore testnode.jks -destkeystore dsa.p12 -deststoretype PKCS12 \ + -srcalias testnode_dsa -deststorepass testnode -destkeypass testnode +---- +[source,shell] +---- +openssl pkcs12 -in dsa.p12 -nodes -nocerts | openssl pkcs8 -topk8 -nocrypt -outform pem \ + -out dsa_key_pkcs8_plain.pem +---- +[source,shell] +---- +keytool -importkeystore -srckeystore testnode.jks -destkeystore ec.p12 -deststoretype PKCS12 \ + -srcalias testnode_ec -deststorepass testnode -destkeypass testnode +---- +[source,shell] +---- +openssl pkcs12 -in ec.p12 -nodes -nocerts | openssl pkcs8 -topk8 -nocrypt -outform pem \ + -out ec_key_pkcs8_plain.pem +---- + + + +Create `PKCS#8` encrypted key from the encrypted `PKCS#1` encoded `testnode.pem` +[source,shell] +----- +openssl pkcs8 -topk8 -inform PEM -outform PEM -in testnode.pem -out key_pkcs8_encrypted.pem +----- +[source,shell] +----- +ssh-keygen -t ed25519 -f key_unsupported.pem +----- + + +Convert `prime256v1-key-noparam.pem` to `PKCS#8` format +----- +openssl pkcs8 -topk8 -in prime256v1-key-noparam.pem -nocrypt -out prime256v1-key-noparam-pkcs8.pem +----- + +Generate the keys and self-signed certificates in `nodes/self/` : + +------ +openssl req -newkey rsa:2048 -keyout n1.c1.key -x509 -days 3650 -subj "/CN=n1.c1" -reqexts SAN \ + -extensions SAN -config <(cat /etc/ssl/openssl.cnf \ + <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node1.cluster1")) -out n1.c1.crt +------ + + +Create a `CA` keypair for testing +[source,shell] +----- +openssl req -newkey rsa:2048 -nodes -keyout ca.key -x509 -subj "/CN=certAuth" -days 10000 -out ca.crt +----- + +Generate Certificates signed with our CA for testing +[source,shell] +------ + openssl req -new -newkey rsa:2048 -keyout n2.c2.key -reqexts SAN -extensions SAN \ + -config <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node2.cluster2"))\ + -out n2.c2.csr +------ + +[source,shell] +------ +openssl x509 -req -in n2.c2.csr -extensions SAN -CA ca.crt -CAkey ca.key -CAcreateserial \ + -extfile <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node2.cluster2"))\ + -out n2.c2.crt -days 10000 +------ diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.jks b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.jks index d6dc21c1bd5ff4a248bfba5a171518cd649e278d..495d43d69eb112dafb4c0067f8586b7da33f6d75 100644 GIT binary patch delta 375 zcmbOywN;Mi-`jt085kItfS7qB&p#e_Fo(M&wYVfNKP5FjHJO2dF)J%Y7nTeIbKxy(DPGv0;$`k69-BPF#kH6L-Ou-sfyA|ChTdw;JYhEoHPFB?`Y&*epYPP_ggWPNs-~=@xaiot9fUnvJQz% u`#CFndy2PRq|>8wj2v8UeDX@y>U3^?irYSK%A>pUG?gpcyTZS{@&*7CTZ7jC delta 42 wcmdlgH&2S^-`jt085kItKzJk1KOPZ7xA^DtD-_@6?)fvvVUENMk3DX20A3^!Bme*a diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks index 832a23d2dbf09ef4571049b617623200af3dd86d..ebe6146124e8fd607e46a1a3129bdf9b4de0370d 100644 GIT binary patch delta 2897 zcmZvec{CJy8^>pBEXfkazAHkR>>6v95XQ(DB>RlAjB2vWm=UsMo3c%tvMVK16XW6? z8e2y8C9Xt>5m!YKUT?Sao_pW>{_&jid(Lk;-}8Kb=XpNshS|1!JIg!E0002A7u>ac zFhSz}D_Fu6d{_Y18|x1M0R2O+tiXQEJVH&h*(ImlwQxwy z$W6fwUQs)BCVD&oE?l+c5gQ}*hT3GO6UHNaF~9dPqf1C*L;r53a$<=q6BbatrvV7q z=XOjNZn(Gl3?~Cu_JVRB3<82ckjv-g5pX^zw_36d+ZX}{?E}CSDFvb`G6rznigPhuKO9+$L4KP1uM zq*}NP6pWVPhrl2r4gi0~C#-^0Np@q>8;1r@F&^(RiUNp2{F`#H4FGNhk`KJB8G$p@ zz?@|zIoDO8$NQtdBlRa#@dI-NlgupzG)Mu^*N#G`4dGu)1~EnQz83}415TT`pM1{- za`RgA%P9{3tQp^b4ZTe8-7B5npl7@b0I&}XCqs7s^pyA)Q^}B}ya_*#wNZcDr4E2x z)phIm<<{t6C|+`|9qPXgI8|F46X)D7pNWf@5mnTf!lq7JAex(g^l2}=Bj6tDDd5At zaUzsIt+tF#1S0jM-a5{7QC?PvhlG915O1Y)Ww+;+;K!9u6v1?|KG>*P)?rc~+s6eOMBw6;%?QzX(m8+j#omUNw z+m$~aZ)E7?=IvWXto_ji6p(@tC9t)V`Bh@(PU3j}iP~evMeN_tbDIqLl#&tv5`_$P zD$8pXrzv!&LuA!sS)prP)7%>9$aD)(?3o_8P-n(dq5TyDwo-yls)wu@6=3fpO$t>vVn3) zCC+F;ae(O_Sjdo`RB9vMX^n=2kU4&xgn@d7sq}o8H^VYm(GbB-MbuEqzy`Cbgmp9_RuO{Is;Hb98zA^zH(RH_`H|fS_Faq59w*B0m zDXHp3gKHcY!m*?6OFeyOG;mfOgqAnb-N(+ppG6rpaV@-ck|93P4(oI3xYUtT4J1rIP=qN65Rm>G^qv% ztm|*(-$+MfxSdFSZ~HXfXje-6mKP1O8HO(`A=GDIOmS^+Dj%a8vzaP_Zu!+hq9aEo z9bZnC2e)gr5Y4C=JhD(;t5*{$r6#GTcawU3I^>lM$!2xA-L$gMJF@hd6PlCL9ae?P zkCb(eY9>Z?4wgxVqh}En=BEkRQ%~d zwtF}xBe}dm9Tz2KMi_<3(`L60fp&`*ufo5`H#X+uh*FukO*eX0*wQ`MDxY|dv0m>i z1XtHbg@mOSU7K0BjTHCM$jW;qK8RhsSjQ!;V6O4#+)2}X^ z+D$|rxF4d#{oEyMVg6eKw~`j9!+GoZ*+~s%kDh>pW@gS@L=i5v_r}47Jm~yoDQ%6< ztU7(sYA)jcbjyIB-O@iaHWIbhHM91HP5M73AQ>3>jtAiAGx)x6YO?`o8813tB+uvf z=CGISSM!~x@pEHuY|1}ab}e?9iFM1Bdp?fx^X$s4!nAnlN|l&FGxSDXCx-(w9yIS_ z*u?GY@$VdhhgIZfI!#mijidQ#DKU1}!T0dY_+r@Ayo91UQQGBr{UhvB*T)MJr)`ew z>zfu(%{irCL@N|7R1-qP?;5j#R4K)gJi8`0osaedW`5hePF%D}a7kpkJt2Gi_JN#$ z;w89qPL@NXXg5#dqPj$%LCRZpdeJQj4U>_AOkmFMI72LFn^H|TQPgmc(h7Z-F~ZvYc*B(+VQ#P zu9xFDP-VAm#CP2ZBo|Wowizd_!_p(m_~ynBs2Q+U6P*)g;{5p3C6CR-2Y#b+SNtjA z=RSu^MG7A+rQUaeE?rY;8$o23bF1Xzc=J}C^t31MbMmqnpTa*2iD87FFira#5rB>0 zWUv984Epkm2o#M7LGS}m9-m|zAQt?8kU&TKxWQkL;Ai>^3CKYDp4}RMVu1k!xR=O* zNF=~E@eD~O{NNGiDZAUpIJ5KXLUd=N6-z*?tZBjv)Zq(O=gbsQTkgg;+%}tEeQkiR zcT>?JBP2wEj851Z&W9dc=u}#A@7@9=R#1_NWK@pq&{;cQsS`YtRqwYJRwz8%OmblYr18fJ00`h&FvP6~kzkZfmPkyj;ZU3s9^wX>AgH!869QQMzEN4m$)0M?;h3=W!3A-!gKHsiTgJdbp8I=@{0i~BVL#M(NuxT zPOV6sn)di`51N~f^Gb+C9|{24BX)Xi)xe}{01zo5(jH%Lbr2XUc0pjWd~kZ7E+O2_ zno??Z3HNOvHshRqzFH?!9zvm(4Y^hHv@Kgc-R{pem8C9NOe>kEUB(T;+2op3_qQ6lzh0L_MCA ztHjWY{y|+9YX-~~Fsn_y%tzSXT0unF*xCSPT@5_DPje5+@%_*1OJhygSbb({5*pB2 z*wJ@rdy$!PMj)AgZ?%cS*FIBG$9n0%Ywh+?cO#r!06X-ZiGeI3&hq(!U5Pz#%x{jR zgdR!XCt&+NzV{-bN!jlGUx}N^1z1~he@d7!`y`OK11BLPf6dOh>?RO{cRn-EU!mC|wxG?s4{+CSo_7v?~0BJv$0$||%{RgsqprD)jB-uC0tesmce>sH1{B_;0 zx@PWGT)}4l-;mDul~E8@QyG7qLql9+$QyzxX$FS3Tf7qdhmZk2#(d=f&6`@oU9tZ~ znJ%q|RT%TNU_}5;6 Gw}2lTb%)F1E=E$AYv?<=+_X%#}W<|WF1$lK3k4SY%m@) z-)f1{|56u1)-a{oF#^68+!J9SSlIt%I1eFqqEhkteJ_VlDG zItsD$U@iRaI@l4$J}VMD#Q^u*Hehv4m<~s#usU+j z8*+Ffi$yaZ0^Ehqf9q82%eJg`Y^0x4tM^vy|2P@))*Xtjo0*DzZE<>>#lz(C1xf#( zd5+m;&1XdmU$@pF1Dy5+9R*k>__fc=hcc`36ryy?O}Y>;*R_>GY-X$CdX1(L(y=E^ zdqsIdjVOM|$xgVhDF-I{V%#JpYTu@H9K?IY{6*kz$POX0f4xpTT0IfS5yCu;2$U>m zixYzb!e*z`f|eh}AFy3`f1Gfvu}N@WM6JFk8S+{tdcl!l+gIGx@C$t+iW}dxF@2ZV z#(8f=^j$4JCRsmhNgE3=8moJcZ{A*}IL{@yw1~|I4+-!>#;Qa#IPF|&+esHK;;cq0 z;Ww#1|BD-l4(SfN%XHt<9!cF$TJ!a@$QQf^6zzA$e(cN7!8229b%VR4rA47A^3kq@ AEC2ui diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.p12 b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.p12 index 2ac8125b58d3113e25351f95999241ae2d4ef031..0e6bcaa4f8b7685d7f5b99df290a52949500367c 100644 GIT binary patch literal 2699 zcmY+EcRUn~AIHxvxg!ZDdnQ5-@f_K^jLyjF%3eh{GeRBCx{iku*?VT3QF0lF=%lQ; z&bU;PE##==@%z1gzvp>9e|*>H`}uzVd?RtNL?8AJl$c_2 zOs_(AcG)+k^y8jh>BG51{h)vU1VFvNWaR%(b%#g|>xYrGgpK)hsH zCt{3cF4Tu9dA@#Bh0t$}=mWoVCe(i*`dJb7Hz&E=p2SMdSDr8(BZ-5oZQ8e2V&hiQ5&_I+j`a=T08Q*Re8 z@!UG_@%dJqRaY@6WlZfEb-c2%p&@wA)yaKaCZ1r_FFC2y(4&8z!gFm(;d*c`%}MO* z-0QR-ujLkLiM3a?jy8V7>i>~b; zG>v$X&;bmA40q>l-Q+C2Rqh+8>D0g$uT1U{jtm${qpl(s@7N2EE{MxScYZwML^=C| z#}+2{r8r_J`tndT?|J*k1ws_DCR?rGl5$E&TUCJ%4bZf4JHCb0(Is2NHeVCfZ!_D9 z_hn(vw?FN`J^tkcKn`H-5TUQgZ(`sg{h4)hXTSTqBzZBUfuwP7tlb}U4rWf^=_53v zNrDs`a|QNu&t7Og@{+Bv4@`V7(!AO3Zu6i9dq1DNLK7V}^tI+PLQGBM33@}OIfP3U zkY!lvdFdTZ$ZXLmbb9+}Xv`I-Koe@Nd1AWWh0MLVwwAheIdj$mBJ$NVyuPQeNZtoi z^x9%M`1Gi2Z}4n8DcdUM_y|7pbFW+HKHlu=xE1*gd2`fK`vvP;f;WdcQ(it{Hg*wl zAJTLWM(!zGEc7&{^>UslCK-!3jv`m+Yjh1~^}gOd;_!%(aG>lO9FKNsK9(}2Wgg@m zFuN|S+c4HW{rqu*HK-(d%k^$V<6U<%HJ1g9vNIqd^kXkMXwOIEW|rmXm8b}t2W9cp zx?zHX$YYbZXvZ^tp?bdbUExS$HSCg`vkkt_j#&&dW2CGi6s*^Bgr(r<8QXxq-Nb%b z2h4FjHxKhgO*cY{n$z2R{A<%1?@)jXYSV-w=dM+(TGj0<+Y6hv1^e@lio>JZH-S=DrzWY zbrtnLjzC)Xw}};c#Mb;HDF?^`I4T+cY*_w9Tj+miyMlr*D`&Dfc^P{(?~WdDL-%2~Jg zoVceNZ(d&!(jLVVnuzCeLt$OxTp~sOyYB=x)F1k#FC|=ZZfSa1MpH&$)+F+<-VAfc zT-L1{VHNBj&mNBdR6aDgNoFJP{;wg&99h$6bP}?hy1p=gJ#-#3PwJ==fG1h} zxVO+r_i~7yzQ_1+z$u@|&_uPWPSTX~V5fHwajw!GP$tu>1O3W;Ds)tjz*~Q4F}$M% zSB>mmOCLy`&zMVTs_JY|YxD}g_N>1MF;6cB!l~shQ7wDlrdz+F$8KJ=k8i3xC-il% zjBn3U|Nex~48Or73KepF1W`wCk94~Y6X|6RTXRn#SGV4i4XglR%=7~&8oDrD zE1G|(&*Ga}8Sp@7OKi+&fc1&7RDDf*NPbvJ$EnvNz(RrqkV$+h=U%jtxnnr-Y>7Hqx0N^gM|gX+ zc1vR`s5ML4htZ|=6ah}qwdW&N>3W+&C(D_w{`Du%UFAg@Pd~Q% znR3ct@UsD}uq)@oGUVYYey9>>*(DA6x;Zn|>n;SoJi>v`IPDtLmzQA13pO}q#JD7o zuSHXR`zdH{k%KF;ni}xIFd*#h3F)zmZS!vZBwJT6$Bq}O1T|TGNc1K)iqIphB+yqj%};ywFHUgj%xf*n&uc1fXow^90K@8mRTYL| zIrnNnKChAr)jnI)8kx<65@sU!C0+>W*AIW~NRM^KFRy%5H1-U4zf2#=)TiY2UXmBQ zA$A=6j?+%}q^G7f*z!xcRL2d$7u!P3SCHCBaU>fQB6l1F6o3H0JcWT7PnvxAQJh;Z o(V~Hq)$ literal 2654 zcmY+^cRU-476A7pI;OaYD)*CM-ic-U?$l({kRiWAR~}O zgmQw2P}Yl>2}J}I{<8v;h@iBKD1{CPxEQhjY=9^P82axIEI=@d4Fth?eb^ZL!y%oH z4gw$$LDq`jJk_1T5QaMGtd=(u2#y|^LmKzTTT`4}!ar^_diUL%7tc?oK0&4`B>VFF zJ|XvHFe~Ep4=1CkRh2t>)Gx0;O~-SD;HU3ch-k`P>93>)TS|O7sc{yZ+)vnGP%yB$ zdgEz*Erd;I`Qc&-7+X%a@c&6r<45b+nhcid1hqnc{5Lh+&GI79dwo zI;WqWkQ2_GiWV5ju(3|>xj2_ltu0%RZ86P~h+UMKoVvR2$SHUCo0i}>GXlotNfg0k zrCOvuc9A(6 z&N1xskA;ys-x{6hM1y7(&{EXP4CQt_&y6z9a~kqesUE7u$R&F}27FMsx!dg$LJLw2 zko=YHmn{~tW!&3FsHfg*J#APlr8R0Bh53_6+EVyFK9MJs>@Oro`q$TetSoh7D2uu+ zqS+IN(>Lx=xxN~@)6?e7@D@?|Ef9-3lz+phPs1NSHrhm4k?QhbOnQ4JQ#1QV@j4;( zwR<`hwkC`1`>G#`EI^FEy{PS%q%m&HgoXX`^o1PEeKc^#p(*Jn<48$Eg=1VEBkV+@B$M zUVQsYo2^MIx}M?!ed-=IS(GHPeMRE&uF;|PjHe%MWbh@&*}bb~(nC9(dOyB#TWvm* ze$>@erzGsAjKH9LBmKv!y9nnX!BJ|Uw3|am7gg+taAO2()Hkc=w5b)9{)|(fmHA6Q z4}P_hMk{{TdacXTrlnPqEiyr#;!ZmCFP3YrO^1!cEHwFtg&Rs0+X~K#ZzB%Gc58X2&-r$zR9i|)S&;x;2RVHb}@l*;V zzIj*P8zask)7nXZyB=58T9nX>s`PhEAhN=42gg?{v0h3Ej{a_j7J-jN@11#GvwEiE zMrT*c;$xBfr{BG?t#wD;K!zK&Id*fYcb}Sw8vIo62CTQ+oH%kl3|TEsZyzmQBZdWD zD|IYf<_OJACtWe$R%od7&OA<0m1*{JLJ=99{s$rwk--8)WH7mi4KGR$%={mgpg_6{ zzxFQR+WtSS=>OL06?~eK2`*m!TPqL|)K_TLiH(+JYGojaKd>|Ya6~n{-nHp05f+`O zz?4J;8jJvKK58ev`M%`eUSKz=Jr1#C4*G%Zc+$m}*feG%V-w>gG+)2JO`$(iN@AYN zTVt+v!K!LiN(W00=z1jFl9&$QwT2qUI&M6}gyE(%IGI~d`ci+s6{`wnRr9_X#JE*F z5xRM-B|M<+>5}aJF26;uO3EHK*{UAU0G@@V?iOMdDG2YM9KC~%07Y+I@tv02{5V0^ z{?Uouxr%{`mI-ZEX@XSq0PBpr$<~{r0oQ#lu8j=oPR9f+0f`Ivu4Ed9jPU@bM<4B6ghJce4qy?mSXPry60#5|ZMZ))&rv;4p@)XbAb4 zQKEFm#d>}JmV6Vpp0z<4q;%c^ z-@5Wk_Uq8CS@Rm_C9gECfH8sY5E0(TahVz;dI8eBqL@fU2gR&`NK}i@HI0Qwy7pTL zwPfhQBH`h_XE^2jbI8KNUJsxd&=>t{w~iB+`k!t2xjk?0^vdVwOFM)H?~Ydc6c?DQbco@TwH-ZOv+dVP@;9Q%$T_%>)(5h3w4*@`j=MT?I9c5>mClbetQEHav7t^J^YTAM7k#&kB|+ zblnt}*ZdUKXuy}it+KSqZK7cri7=p+M=~zKQgpvcb-qZ?ex_lv4qO(r4~e!zlwh}@ZhQh`h^d42!i#%$nEE9zAV;jC$dUNTpxF2H$2#S zmYj4jcC=ued>^!XEoZ^s6kDdvNbpq6@0$kbsOBhj8YS`i*UJ6@#H)<3`!vk4&zzMq zFDbJ$n%Y(d;l3RyigC6Gw`MmEm%{_vdIR1OB{{81Z)Wz2x86RB(3atqraxSXxv>t< z`ELLDc~VKek_@Ry?DX!fTPtmvm%VlG?xc9u-3Mn^Rv8Hxb9{p8d|hh=vL(aH5-RFr zvMh#FNNC7X?}}FclB;sqS}B?^;-de=ez}5+Xf>cE>awFp$07E3p}&%=+pHAt!OEeD z(Qo`|PhV+8<3`v;T?*%Z?q!BQrtxL>DPSqp%b&4j2aS%R5WUw);&pru*;6@ZF*6Hj zU6jawb!6f}a|v?*f&i|7z>Dq+@B_F2Tv5n>4q#|Fh(}-ws&R0@(dfSdY)s7Mr?+E+ z#iP(DF%%P+L5_`{PJjUb;%PkiJv;_~SIQ&Cv_`mEBE2JL&H)5D>1{%}NTz)Lo6)}j D&}!D= diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt new file mode 100644 index 00000000000..f4c9a6a7aac --- /dev/null +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode_ec.crt @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIIB7zCCAZOgAwIBAgIEcmggOzAMBggqhkjOPQQDAgUAMCIxIDAeBgNVBAMTF0Vs +YXN0aWNzZWFyY2ggVGVzdCBOb2RlMB4XDTE4MDUxNzA5MzYxMFoXDTQ1MTAwMjA5 +MzYxMFowIjEgMB4GA1UEAxMXRWxhc3RpY3NlYXJjaCBUZXN0IE5vZGUwWTATBgcq +hkjOPQIBBggqhkjOPQMBBwNCAATuZRlXGn/ROcO7yFJJ50b20YvgV3U+FpRx0nx/ +yigWj6xiEMKnWbbUnM0mKF8c3GHGk5g8OXPnbK96uj6tpMB5o4G0MIGxMB0GA1Ud +DgQWBBRNAGO77mUhG6SQvIXQTbpcFwlf2TCBjwYDVR0RBIGHMIGEgglsb2NhbGhv +c3SCFWxvY2FsaG9zdC5sb2NhbGRvbWFpboIKbG9jYWxob3N0NIIXbG9jYWxob3N0 +NC5sb2NhbGRvbWFpbjSCCmxvY2FsaG9zdDaCF2xvY2FsaG9zdDYubG9jYWxkb21h +aW42hwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMAwGCCqGSM49BAMCBQADSAAwRQIg +Z3IvdmY5LFdbxoVSs6pV2tJ5+U833Chu0+ZzPo77IVUCIQDRx1FVitVuzBpqwhSW ++Zprt2RLPllC4s4BCApGDh8i1g== +-----END CERTIFICATE----- diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java index bdaa2377fd1..d93657acdc0 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookHttpsIntegrationTests.java @@ -5,12 +5,15 @@ */ package org.elasticsearch.xpack.watcher.actions.webhook; +import com.sun.net.httpserver.HttpsServer; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; +import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ssl.TestsSSLService; import org.elasticsearch.xpack.core.watcher.history.WatchRecord; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; @@ -26,6 +29,10 @@ import org.junit.After; import org.junit.Before; import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Collections; +import java.util.List; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.xpack.watcher.client.WatchSourceBuilders.watchBuilder; @@ -51,6 +58,7 @@ public class WebhookHttpsIntegrationTests extends AbstractWatcherIntegrationTest .put("xpack.http.ssl.key", keyPath) .put("xpack.http.ssl.certificate", certPath) .put("xpack.http.ssl.keystore.password", "testnode") + .putList("xpack.http.ssl.supported_protocols", getProtocols()) .build(); } @@ -131,4 +139,23 @@ public class WebhookHttpsIntegrationTests extends AbstractWatcherIntegrationTest assertThat(webServer.requests().get(0).getBody(), equalTo("{key=value}")); assertThat(webServer.requests().get(0).getHeader("Authorization"), equalTo("Basic X3VzZXJuYW1lOl9wYXNzd29yZA==")); } + + /** + * The {@link HttpsServer} in the JDK has issues with TLSv1.3 when running in a JDK prior to + * 12.0.1 so we pin to TLSv1.2 when running on an earlier JDK + */ + private static List getProtocols() { + if (JavaVersion.current().compareTo(JavaVersion.parse("11")) < 0) { + return XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; + } else if (JavaVersion.current().compareTo(JavaVersion.parse("12")) < 0) { + return Collections.singletonList("TLSv1.2"); + } else { + JavaVersion full = + AccessController.doPrivileged((PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); + if (full.compareTo(JavaVersion.parse("12.0.1")) < 0) { + return Collections.singletonList("TLSv1.2"); + } + } + return XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; + } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java index 6bb607d6805..3ae96499b6a 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.watcher.common.http; import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import com.sun.net.httpserver.HttpsServer; import org.apache.http.HttpHeaders; import org.apache.http.client.ClientProtocolException; import org.apache.http.client.config.RequestConfig; @@ -13,6 +14,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.MockSecureSettings; @@ -28,6 +30,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.http.MockResponse; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.test.junit.annotations.Network; +import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.ssl.TestsSSLService; import org.elasticsearch.xpack.core.ssl.VerificationMode; @@ -44,9 +47,12 @@ import java.net.Socket; import java.net.SocketTimeoutException; import java.nio.charset.StandardCharsets; import java.nio.file.Path; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Locale; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -190,6 +196,7 @@ public class HttpClientTests extends ESTestCase { Settings settings2 = Settings.builder() .put("xpack.security.http.ssl.key", keyPath) .put("xpack.security.http.ssl.certificate", certPath) + .putList("xpack.security.http.ssl.supported_protocols", getProtocols()) .setSecureSettings(secureSettings) .build(); @@ -218,6 +225,7 @@ public class HttpClientTests extends ESTestCase { Settings settings2 = Settings.builder() .put("xpack.security.http.ssl.key", keyPath) .put("xpack.security.http.ssl.certificate", certPath) + .putList("xpack.security.http.ssl.supported_protocols", getProtocols()) .setSecureSettings(secureSettings) .build(); @@ -234,6 +242,7 @@ public class HttpClientTests extends ESTestCase { Settings settings = Settings.builder() .put("xpack.http.ssl.key", keyPath) .put("xpack.http.ssl.certificate", certPath) + .putList("xpack.http.ssl.supported_protocols", getProtocols()) .setSecureSettings(secureSettings) .build(); @@ -370,6 +379,7 @@ public class HttpClientTests extends ESTestCase { Settings serverSettings = Settings.builder() .put("xpack.http.ssl.key", keyPath) .put("xpack.http.ssl.certificate", certPath) + .putList("xpack.http.ssl.supported_protocols", getProtocols()) .setSecureSettings(serverSecureSettings) .build(); TestsSSLService sslService = new TestsSSLService(serverSettings, environment); @@ -383,6 +393,7 @@ public class HttpClientTests extends ESTestCase { .put(HttpSettings.PROXY_PORT.getKey(), proxyServer.getPort()) .put(HttpSettings.PROXY_SCHEME.getKey(), "https") .put("xpack.http.ssl.certificate_authorities", trustedCertPath) + .putList("xpack.http.ssl.supported_protocols", getProtocols()) .build(); HttpRequest.Builder requestBuilder = HttpRequest.builder("localhost", webServer.getPort()) @@ -737,4 +748,23 @@ public class HttpClientTests extends ESTestCase { private String getWebserverUri() { return String.format(Locale.ROOT, "http://%s:%s", webServer.getHostName(), webServer.getPort()); } + + /** + * The {@link HttpsServer} in the JDK has issues with TLSv1.3 when running in a JDK prior to + * 12.0.1 so we pin to TLSv1.2 when running on an earlier JDK + */ + private static List getProtocols() { + if (JavaVersion.current().compareTo(JavaVersion.parse("11")) < 0) { + return XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; + } else if (JavaVersion.current().compareTo(JavaVersion.parse("12")) < 0) { + return Collections.singletonList("TLSv1.2"); + } else { + JavaVersion full = + AccessController.doPrivileged((PrivilegedAction) () -> JavaVersion.parse(System.getProperty("java.version"))); + if (full.compareTo(JavaVersion.parse("12.0.1")) < 0) { + return Collections.singletonList("TLSv1.2"); + } + } + return XPackSettings.DEFAULT_SUPPORTED_PROTOCOLS; + } } diff --git a/x-pack/qa/reindex-tests-with-security/build.gradle b/x-pack/qa/reindex-tests-with-security/build.gradle index 64e1c61b607..7cbdfae5ed4 100644 --- a/x-pack/qa/reindex-tests-with-security/build.gradle +++ b/x-pack/qa/reindex-tests-with-security/build.gradle @@ -36,6 +36,12 @@ integTestCluster { setting 'xpack.security.http.ssl.key_passphrase', 'http-password' setting 'reindex.ssl.truststore.path', 'ca.p12' setting 'reindex.ssl.truststore.password', 'password' + + // Workaround for JDK-8212885 + if (project.ext.runtimeJavaVersion.isJava12Compatible() == false) { + setting 'reindex.ssl.supported_protocols', 'TLSv1.2' + } + extraConfigFile 'roles.yml', 'roles.yml' [ test_admin: 'superuser', From 072a9bdf55b1114c59bdd026025f5f71e6665f7f Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Mon, 20 May 2019 09:46:37 -0400 Subject: [PATCH 04/25] Fix FiltersAggregation NPE when `filters` is empty (#41459) If `keyedFilters` is null it assumes there are unkeyed filters...which will NPE if the unkeyed filters was actually empty. This refactors to simplify the filter assignment a bit, adds an empty check and tidies up some formatting. --- .../search.aggregation/220_filters_bucket.yml | 12 ++++ .../filter/FiltersAggregationBuilder.java | 61 ++++++++++--------- .../aggregations/bucket/FiltersTests.java | 38 +++++++++++- 3 files changed, 78 insertions(+), 33 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml index a6b7cae1044..e0183f0c54f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/220_filters_bucket.yml @@ -251,8 +251,20 @@ setup: --- "Bad params": + - skip: + version: " - 7.1.99" + reason: "empty bodies throws exception starting in 7.2" + - do: + catch: /\[filters\] cannot be empty/ + search: + rest_total_hits_as_int: true + body: + aggs: + the_filter: + filters: {} - do: + catch: /\[filters\] cannot be empty/ search: rest_total_hits_as_int: true body: diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java index 810126e8512..54dfc301b2d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java @@ -40,6 +40,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.List; import java.util.Map; import java.util.Objects; @@ -47,7 +48,7 @@ import java.util.Objects; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; public class FiltersAggregationBuilder extends AbstractAggregationBuilder - implements MultiBucketAggregationBuilder { + implements MultiBucketAggregationBuilder { public static final String NAME = "filters"; private static final ParseField FILTERS_FIELD = new ParseField("filters"); @@ -74,7 +75,7 @@ public class FiltersAggregationBuilder extends AbstractAggregationBuilder(filters); if (keyed) { // internally we want to have a fixed order of filters, regardless of the order of the filters in the request - Collections.sort(this.filters, (KeyedFilter kf1, KeyedFilter kf2) -> kf1.key().compareTo(kf2.key())); + this.filters.sort(Comparator.comparing(KeyedFilter::key)); this.keyed = true; } else { this.keyed = false; @@ -220,9 +221,9 @@ public class FiltersAggregationBuilder extends AbstractAggregationBuilder doBuild(SearchContext context, AggregatorFactory parent, Builder subFactoriesBuilder) - throws IOException { + throws IOException { return new FiltersAggregatorFactory(name, filters, keyed, otherBucket, otherBucketKey, context, parent, - subFactoriesBuilder, metaData); + subFactoriesBuilder, metaData); } @Override @@ -248,15 +249,15 @@ public class FiltersAggregationBuilder extends AbstractAggregationBuilder keyedFilters = null; - List nonKeyedFilters = null; + List filters = new ArrayList<>(); - XContentParser.Token token = null; + XContentParser.Token token; String currentFieldName = null; String otherBucketKey = null; Boolean otherBucket = null; + boolean keyed = false; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -265,61 +266,61 @@ public class FiltersAggregationBuilder extends AbstractAggregationBuilder(); String key = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { key = parser.currentName(); } else { QueryBuilder filter = parseInnerQueryBuilder(parser); - keyedFilters.add(new FiltersAggregator.KeyedFilter(key, filter)); + filters.add(new FiltersAggregator.KeyedFilter(key, filter)); } } + keyed = true; } else { throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { if (FILTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - nonKeyedFilters = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + List builders = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { QueryBuilder filter = parseInnerQueryBuilder(parser); - nonKeyedFilters.add(filter); + builders.add(filter); + } + for (int i = 0; i < builders.size(); i++) { + filters.add(new KeyedFilter(String.valueOf(i), builders.get(i))); } } else { throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } else { throw new ParsingException(parser.getTokenLocation(), - "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); } } + if (filters.isEmpty()) { + throw new IllegalArgumentException("[" + FILTERS_FIELD + "] cannot be empty."); + } + + FiltersAggregationBuilder factory = new FiltersAggregationBuilder(aggregationName, filters, keyed); + if (otherBucket == null && otherBucketKey != null) { // automatically enable the other bucket if a key is set, as per the doc otherBucket = true; } - - FiltersAggregationBuilder factory; - if (keyedFilters != null) { - factory = new FiltersAggregationBuilder(aggregationName, - keyedFilters.toArray(new FiltersAggregator.KeyedFilter[keyedFilters.size()])); - } else { - factory = new FiltersAggregationBuilder(aggregationName, - nonKeyedFilters.toArray(new QueryBuilder[nonKeyedFilters.size()])); - } if (otherBucket != null) { factory.otherBucket(otherBucket); } @@ -338,9 +339,9 @@ public class FiltersAggregationBuilder extends AbstractAggregationBuilder FiltersAggregationBuilder.parse("agg_name", parser)); + assertThat(e.getMessage(), equalTo("[filters] cannot be empty.")); + } + + { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + builder.startObject(); + builder.startObject("filters").endObject(); // keyed object + builder.endObject(); + XContentParser parser = createParser(shuffleXContent(builder)); + parser.nextToken(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> FiltersAggregationBuilder.parse("agg_name", parser)); + assertThat(e.getMessage(), equalTo("[filters] cannot be empty.")); + } + } } From b4a413c4d0c03f5beca7757c8544f84e61fd85d8 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Mon, 20 May 2019 17:55:29 +0300 Subject: [PATCH 05/25] Hash token values for storage (#41792) (#42220) This commit changes how access tokens and refresh tokens are stored in the tokens index. Access token values are now hashed before being stored in the id field of the `user_token` and before becoming part of the token document id. Refresh token values are hashed before being stored in the token field of the `refresh_token`. The tokens are hashed without a salt value since these are v4 UUID values that have enough entropy themselves. Both rainbow table attacks and offline brute force attacks are impractical. As a side effect of this change and in order to support multiple concurrent refreshes as introduced in #39631, upon refreshing an pair, the superseding access token and refresh tokens values are stored in the superseded token doc, encrypted with a key that is derived from the superseded refresh token. As such, subsequent requests to refresh the same token in the predefined time window will return the same superseding access token and refresh token values, without hitting the tokens index (as this only stores hashes of the token values). AES in GCM mode is used for encrypting the token values and the key derivation from the superseded refresh token uses a small number of iterations as it needs to be quick. For backwards compatibility reasons, the new behavior is only enabled when all nodes in a cluster are in the required version so that old nodes can cope with the token values in a mixed cluster during a rolling upgrade. --- .../core/security/authc/support/Hasher.java | 18 + .../resources/security-index-template-7.json | 15 +- .../security-tokens-index-template-7.json | 15 +- ...nsportOpenIdConnectAuthenticateAction.java | 6 +- .../saml/TransportSamlAuthenticateAction.java | 3 +- .../token/TransportCreateTokenAction.java | 3 +- .../token/TransportRefreshTokenAction.java | 4 +- .../xpack/security/authc/TokenService.java | 510 +++++++++++------- .../xpack/security/authc/UserToken.java | 2 +- ...ansportOpenIdConnectLogoutActionTests.java | 19 +- ...sportSamlInvalidateSessionActionTests.java | 33 +- .../saml/TransportSamlLogoutActionTests.java | 22 +- .../authc/AuthenticationServiceTests.java | 20 +- .../security/authc/TokenServiceTests.java | 325 ++++++----- .../security/authc/support/HasherTests.java | 4 + 15 files changed, 627 insertions(+), 372 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java index 492622b2c51..28f26374813 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/Hasher.java @@ -351,6 +351,24 @@ public enum Hasher { return CharArrays.constantTimeEquals(computedHash, new String(saltAndHash, 12, saltAndHash.length - 12)); } }, + /* + * Unsalted SHA-256 , not suited for password storage. + */ + SHA256() { + @Override + public char[] hash(SecureString text) { + MessageDigest md = MessageDigests.sha256(); + md.update(CharArrays.toUtf8Bytes(text.getChars())); + return Base64.getEncoder().encodeToString(md.digest()).toCharArray(); + } + + @Override + public boolean verify(SecureString text, char[] hash) { + MessageDigest md = MessageDigests.sha256(); + md.update(CharArrays.toUtf8Bytes(text.getChars())); + return CharArrays.constantTimeEquals(Base64.getEncoder().encodeToString(md.digest()).toCharArray(), hash); + } + }, NOOP() { @Override diff --git a/x-pack/plugin/core/src/main/resources/security-index-template-7.json b/x-pack/plugin/core/src/main/resources/security-index-template-7.json index ebf6d073cd8..dae6462b7a6 100644 --- a/x-pack/plugin/core/src/main/resources/security-index-template-7.json +++ b/x-pack/plugin/core/src/main/resources/security-index-template-7.json @@ -213,8 +213,19 @@ "type": "date", "format": "epoch_millis" }, - "superseded_by": { - "type": "keyword" + "superseding": { + "type": "object", + "properties": { + "encrypted_tokens": { + "type": "binary" + }, + "encryption_iv": { + "type": "binary" + }, + "encryption_salt": { + "type": "binary" + } + } }, "invalidated" : { "type" : "boolean" diff --git a/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json b/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json index e7450d0be9c..312d9ff9e3f 100644 --- a/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json +++ b/x-pack/plugin/core/src/main/resources/security-tokens-index-template-7.json @@ -35,8 +35,19 @@ "type": "date", "format": "epoch_millis" }, - "superseded_by": { - "type": "keyword" + "superseding": { + "type": "object", + "properties": { + "encrypted_tokens": { + "type": "binary" + }, + "encryption_iv": { + "type": "binary" + }, + "encryption_salt": { + "type": "binary" + } + } }, "invalidated" : { "type" : "boolean" diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java index 1b4aff064a0..4bab16cf921 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectAuthenticateAction.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.security.action.oidc; import com.nimbusds.oauth2.sdk.id.State; import com.nimbusds.openid.connect.sdk.Nonce; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -36,6 +38,7 @@ public class TransportOpenIdConnectAuthenticateAction private final ThreadPool threadPool; private final AuthenticationService authenticationService; private final TokenService tokenService; + private static final Logger logger = LogManager.getLogger(TransportOpenIdConnectAuthenticateAction.class); @Inject public TransportOpenIdConnectAuthenticateAction(ThreadPool threadPool, TransportService transportService, @@ -67,9 +70,8 @@ public class TransportOpenIdConnectAuthenticateAction .get(OpenIdConnectRealm.CONTEXT_TOKEN_DATA); tokenService.createOAuth2Tokens(authentication, originatingAuthentication, tokenMetadata, true, ActionListener.wrap(tuple -> { - final String tokenString = tokenService.getAccessTokenAsString(tuple.v1()); final TimeValue expiresIn = tokenService.getExpirationDelay(); - listener.onResponse(new OpenIdConnectAuthenticateResponse(authentication.getUser().principal(), tokenString, + listener.onResponse(new OpenIdConnectAuthenticateResponse(authentication.getUser().principal(), tuple.v1(), tuple.v2(), expiresIn)); }, listener::onFailure)); }, e -> { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java index 6b61742eed2..96eec7e8fd6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/saml/TransportSamlAuthenticateAction.java @@ -63,10 +63,9 @@ public final class TransportSamlAuthenticateAction extends HandledTransportActio final Map tokenMeta = (Map) result.getMetadata().get(SamlRealm.CONTEXT_TOKEN_DATA); tokenService.createOAuth2Tokens(authentication, originatingAuthentication, tokenMeta, true, ActionListener.wrap(tuple -> { - final String tokenString = tokenService.getAccessTokenAsString(tuple.v1()); final TimeValue expiresIn = tokenService.getExpirationDelay(); listener.onResponse( - new SamlAuthenticateResponse(authentication.getUser().principal(), tokenString, tuple.v2(), expiresIn)); + new SamlAuthenticateResponse(authentication.getUser().principal(), tuple.v1(), tuple.v2(), expiresIn)); }, listener::onFailure)); }, e -> { logger.debug(() -> new ParameterizedMessage("SamlToken [{}] could not be authenticated", saml), e); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java index 4b648d5ed4b..65456ccd2af 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportCreateTokenAction.java @@ -88,9 +88,8 @@ public final class TransportCreateTokenAction extends HandledTransportAction listener) { tokenService.createOAuth2Tokens(authentication, originatingAuth, Collections.emptyMap(), includeRefreshToken, ActionListener.wrap(tuple -> { - final String tokenStr = tokenService.getAccessTokenAsString(tuple.v1()); final String scope = getResponseScopeValue(request.getScope()); - final CreateTokenResponse response = new CreateTokenResponse(tokenStr, tokenService.getExpirationDelay(), scope, + final CreateTokenResponse response = new CreateTokenResponse(tuple.v1(), tokenService.getExpirationDelay(), scope, tuple.v2()); listener.onResponse(response); }, listener::onFailure)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java index 71aeb64bc42..5c161d889cf 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/token/TransportRefreshTokenAction.java @@ -31,11 +31,9 @@ public class TransportRefreshTokenAction extends HandledTransportAction listener) { tokenService.refreshToken(request.getRefreshToken(), ActionListener.wrap(tuple -> { - final String tokenStr = tokenService.getAccessTokenAsString(tuple.v1()); final String scope = getResponseScopeValue(request.getScope()); - final CreateTokenResponse response = - new CreateTokenResponse(tokenStr, tokenService.getExpirationDelay(), scope, tuple.v2()); + new CreateTokenResponse(tuple.v1(), tokenService.getExpirationDelay(), scope, tuple.v2()); listener.onResponse(response); }, listener::onFailure)); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 50e98cd0ca6..8d4482a6d58 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -86,6 +86,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.Authentication.AuthenticationType; import org.elasticsearch.xpack.core.security.authc.KeyAndTimestamp; import org.elasticsearch.xpack.core.security.authc.TokenMetaData; +import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.TokensInvalidationResult; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -157,11 +158,12 @@ public final class TokenService { * Cheat Sheet and the * NIST Digital Identity Guidelines */ - private static final int ITERATIONS = 100000; + static final int TOKEN_SERVICE_KEY_ITERATIONS = 100000; + static final int TOKENS_ENCRYPTION_KEY_ITERATIONS = 1024; private static final String KDF_ALGORITHM = "PBKDF2withHMACSHA512"; - private static final int SALT_BYTES = 32; + static final int SALT_BYTES = 32; private static final int KEY_BYTES = 64; - private static final int IV_BYTES = 12; + static final int IV_BYTES = 12; private static final int VERSION_BYTES = 4; private static final String ENCRYPTION_CIPHER = "AES/GCM/NoPadding"; private static final String EXPIRED_TOKEN_WWW_AUTH_VALUE = "Bearer realm=\"" + XPackField.SECURITY + @@ -179,14 +181,18 @@ public final class TokenService { TimeValue.MINUS_ONE, Property.NodeScope); static final String TOKEN_DOC_TYPE = "token"; + private static final int HASHED_TOKEN_LENGTH = 44; + // UUIDs are 16 bytes encoded base64 without padding, therefore the length is (16 / 3) * 4 + ((16 % 3) * 8 + 5) / 6 chars + private static final int TOKEN_LENGTH = 22; private static final String TOKEN_DOC_ID_PREFIX = TOKEN_DOC_TYPE + "_"; - static final int MINIMUM_BYTES = VERSION_BYTES + SALT_BYTES + IV_BYTES + 1; + static final int LEGACY_MINIMUM_BYTES = VERSION_BYTES + SALT_BYTES + IV_BYTES + 1; + static final int MINIMUM_BYTES = VERSION_BYTES + TOKEN_LENGTH + 1; + static final int LEGACY_MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * LEGACY_MINIMUM_BYTES) / 3)).intValue(); static final int MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * MINIMUM_BYTES) / 3)).intValue(); + static final Version VERSION_HASHED_TOKENS = Version.V_7_2_0; static final Version VERSION_TOKENS_INDEX_INTRODUCED = Version.V_7_2_0; static final Version VERSION_ACCESS_TOKENS_AS_UUIDS = Version.V_7_2_0; static final Version VERSION_MULTIPLE_CONCURRENT_REFRESHES = Version.V_7_2_0; - // UUIDs are 16 bytes encoded base64 without padding, therefore the length is (16 / 3) * 4 + ((16 % 3) * 8 + 5) / 6 chars - private static final int TOKEN_ID_LENGTH = 22; private static final Logger logger = LogManager.getLogger(TokenService.class); private final SecureRandom secureRandom = new SecureRandom(); @@ -235,31 +241,71 @@ public final class TokenService { } /** - * Creates an access token and optionally a refresh token as well, based on the provided authentication and metadata with an - * auto-generated token document id. The created tokens are stored in the security index. + * Creates an access token and optionally a refresh token as well, based on the provided authentication and metadata with + * auto-generated values. The created tokens are stored in the security index for versions up to + * {@link #VERSION_TOKENS_INDEX_INTRODUCED} and to a specific security tokens index for later versions. */ - public void createOAuth2Tokens(Authentication authentication, Authentication originatingClientAuth, - Map metadata, boolean includeRefreshToken, - ActionListener> listener) { + public void createOAuth2Tokens(Authentication authentication, Authentication originatingClientAuth, Map metadata, + boolean includeRefreshToken, ActionListener> listener) { // the created token is compatible with the oldest node version in the cluster final Version tokenVersion = getTokenVersionCompatibility(); // tokens moved to a separate index in newer versions final SecurityIndexManager tokensIndex = getTokensIndexForVersion(tokenVersion); // the id of the created tokens ought be unguessable - final String userTokenId = UUIDs.randomBase64UUID(); - createOAuth2Tokens(userTokenId, tokenVersion, tokensIndex, authentication, originatingClientAuth, metadata, includeRefreshToken, - listener); + final String accessToken = UUIDs.randomBase64UUID(); + final String refreshToken = includeRefreshToken ? UUIDs.randomBase64UUID() : null; + createOAuth2Tokens(accessToken, refreshToken, tokenVersion, tokensIndex, authentication, originatingClientAuth, metadata, listener); } /** - * Create an access token and optionally a refresh token as well, based on the provided authentication and metadata, with the given - * token document id. The created tokens are be stored in the security index. + * Creates an access token and optionally a refresh token as well from predefined values, based on the provided authentication and + * metadata. The created tokens are stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED} and to a + * specific security tokens index for later versions. */ - private void createOAuth2Tokens(String userTokenId, Version tokenVersion, SecurityIndexManager tokensIndex, + //public for testing + public void createOAuth2Tokens(String accessToken, String refreshToken, Authentication authentication, + Authentication originatingClientAuth, + Map metadata, ActionListener> listener) { + // the created token is compatible with the oldest node version in the cluster + final Version tokenVersion = getTokenVersionCompatibility(); + // tokens moved to a separate index in newer versions + final SecurityIndexManager tokensIndex = getTokensIndexForVersion(tokenVersion); + createOAuth2Tokens(accessToken, refreshToken, tokenVersion, tokensIndex, authentication, originatingClientAuth, metadata, listener); + } + + /** + * Create an access token and optionally a refresh token as well from predefined values, based on the provided authentication and + * metadata. + * + * @param accessToken The predefined seed value for the access token. This will then be + *
    + *
  • Encrypted before stored for versions before {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Hashed before stored for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in a specific security tokens index for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Prepended with a version ID and encoded with Base64 before returned to the caller of the APIs
  • + *
+ * @param refreshToken The predefined seed value for the access token. This will then be + *
    + *
  • Hashed before stored for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in the security index for versions up to {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Stored in a specific security tokens index for versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
  • Prepended with a version ID and encoded with Base64 before returned to the caller of the APIs for + * versions after {@link #VERSION_TOKENS_INDEX_INTRODUCED}
  • + *
+ * @param tokenVersion The version of the nodes with which these tokens will be compatible. + * @param tokensIndex The security tokens index + * @param authentication The authentication object representing the user for which the tokens are created + * @param originatingClientAuth The authentication object representing the client that called the related API + * @param metadata A map with metadata to be stored in the token document + * @param listener The listener to call upon completion with a {@link Tuple} containing the + * serialized access token and serialized refresh token as these will be returned to the client + */ + private void createOAuth2Tokens(String accessToken, String refreshToken, Version tokenVersion, SecurityIndexManager tokensIndex, Authentication authentication, Authentication originatingClientAuth, Map metadata, - boolean includeRefreshToken, ActionListener> listener) { - assert userTokenId.length() == TOKEN_ID_LENGTH : "We assume token ids have a fixed length for nodes of a certain version." - + " When changing the token length, be careful that the inferences about its length still hold."; + ActionListener> listener) { + assert accessToken.length() == TOKEN_LENGTH : "We assume token ids have a fixed length for nodes of a certain version." + + " When changing the token length, be careful that the inferences about its length still hold."; ensureEnabled(); if (authentication == null) { listener.onFailure(traceLog("create token", new IllegalArgumentException("authentication must be provided"))); @@ -269,10 +315,19 @@ public final class TokenService { } else { final Authentication tokenAuth = new Authentication(authentication.getUser(), authentication.getAuthenticatedBy(), authentication.getLookedUpBy(), tokenVersion, AuthenticationType.TOKEN, authentication.getMetadata()); - final UserToken userToken = new UserToken(userTokenId, tokenVersion, tokenAuth, getExpirationTime(), metadata); - final String plainRefreshToken = includeRefreshToken ? UUIDs.randomBase64UUID() : null; - final BytesReference tokenDocument = createTokenDocument(userToken, plainRefreshToken, originatingClientAuth); - final String documentId = getTokenDocumentId(userToken); + final String storedAccessToken; + final String storedRefreshToken; + if (tokenVersion.onOrAfter(VERSION_HASHED_TOKENS)) { + storedAccessToken = hashTokenString(accessToken); + storedRefreshToken = (null == refreshToken) ? null : hashTokenString(refreshToken); + } else { + storedAccessToken = accessToken; + storedRefreshToken = refreshToken; + } + final UserToken userToken = new UserToken(storedAccessToken, tokenVersion, tokenAuth, getExpirationTime(), metadata); + final BytesReference tokenDocument = createTokenDocument(userToken, storedRefreshToken, originatingClientAuth); + final String documentId = getTokenDocumentId(storedAccessToken); + final IndexRequest indexTokenRequest = client.prepareIndex(tokensIndex.aliasName(), SINGLE_MAPPING_NAME, documentId) .setOpType(OpType.CREATE) .setSource(tokenDocument, XContentType.JSON) @@ -283,15 +338,17 @@ public final class TokenService { () -> executeAsyncWithOrigin(client, SECURITY_ORIGIN, IndexAction.INSTANCE, indexTokenRequest, ActionListener.wrap(indexResponse -> { if (indexResponse.getResult() == Result.CREATED) { + final String versionedAccessToken = prependVersionAndEncodeAccessToken(tokenVersion, accessToken); if (tokenVersion.onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { - final String versionedRefreshToken = plainRefreshToken != null - ? prependVersionAndEncode(tokenVersion, plainRefreshToken) - : null; - listener.onResponse(new Tuple<>(userToken, versionedRefreshToken)); + final String versionedRefreshToken = refreshToken != null + ? prependVersionAndEncodeRefreshToken(tokenVersion, refreshToken) + : null; + listener.onResponse(new Tuple<>(versionedAccessToken, versionedRefreshToken)); } else { - // prior versions are not version-prepended, as nodes on those versions don't expect it. + // prior versions of the refresh token are not version-prepended, as nodes on those + // versions don't expect it. // Such nodes might exist in a mixed cluster during a rolling upgrade. - listener.onResponse(new Tuple<>(userToken, plainRefreshToken)); + listener.onResponse(new Tuple<>(versionedAccessToken, refreshToken)); } } else { listener.onFailure(traceLog("create token", @@ -301,6 +358,15 @@ public final class TokenService { } } + /** + * Hashes an access or refresh token String so that it can safely be persisted in the index. We don't salt + * the values as these are v4 UUIDs that have enough entropy by themselves. + */ + // public for testing + public static String hashTokenString(String accessTokenString) { + return new String(Hasher.SHA256.hash(new SecureString(accessTokenString.toCharArray()))); + } + /** * Looks in the context to see if the request provided a header with a user token and if so the * token is validated, which might include authenticated decryption and verification that the token @@ -406,13 +472,24 @@ public final class TokenService { final Version version = Version.readVersion(in); in.setVersion(version); if (version.onOrAfter(VERSION_ACCESS_TOKENS_AS_UUIDS)) { - // The token was created in a > VERSION_ACCESS_TOKENS_UUIDS cluster so it contains the tokenId as a String - String usedTokenId = in.readString(); - getUserTokenFromId(usedTokenId, version, listener); + // The token was created in a > VERSION_ACCESS_TOKENS_UUIDS cluster + if (in.available() < MINIMUM_BYTES) { + logger.debug("invalid token, smaller than [{}] bytes", MINIMUM_BYTES); + listener.onResponse(null); + return; + } + final String accessToken = in.readString(); + // TODO Remove this conditional after backporting to 7.x + if (version.onOrAfter(VERSION_HASHED_TOKENS)) { + final String userTokenId = hashTokenString(accessToken); + getUserTokenFromId(userTokenId, version, listener); + } else { + getUserTokenFromId(accessToken, version, listener); + } } else { // The token was created in a < VERSION_ACCESS_TOKENS_UUIDS cluster so we need to decrypt it to get the tokenId - if (in.available() < MINIMUM_BASE64_BYTES) { - logger.debug("invalid token, smaller than [{}] bytes", MINIMUM_BASE64_BYTES); + if (in.available() < LEGACY_MINIMUM_BYTES) { + logger.debug("invalid token, smaller than [{}] bytes", LEGACY_MINIMUM_BYTES); listener.onResponse(null); return; } @@ -709,8 +786,12 @@ public final class TokenService { /** * Called by the transport action in order to start the process of refreshing a token. + * + * @param refreshToken The refresh token as provided by the client + * @param listener The listener to call upon completion with a {@link Tuple} containing the + * serialized access token and serialized refresh token as these will be returned to the client */ - public void refreshToken(String refreshToken, ActionListener> listener) { + public void refreshToken(String refreshToken, ActionListener> listener) { ensureEnabled(); final Instant refreshRequested = clock.instant(); final Iterator backoff = DEFAULT_BACKOFF.iterator(); @@ -718,36 +799,49 @@ public final class TokenService { backoff, ActionListener.wrap(tokenDocHit -> { final Authentication clientAuth = Authentication.readFromContext(client.threadPool().getThreadContext()); - innerRefresh(tokenDocHit.getId(), tokenDocHit.getSourceAsMap(), tokenDocHit.getSeqNo(), tokenDocHit.getPrimaryTerm(), - clientAuth, backoff, refreshRequested, listener); + innerRefresh(refreshToken, tokenDocHit.getId(), tokenDocHit.getSourceAsMap(), tokenDocHit.getSeqNo(), + tokenDocHit.getPrimaryTerm(), + clientAuth, backoff, refreshRequested, listener); }, listener::onFailure)); } /** - * Inferes the format and version of the passed in {@code refreshToken}. Delegates the actual search of the token document to + * Infers the format and version of the passed in {@code refreshToken}. Delegates the actual search of the token document to * {@code #findTokenFromRefreshToken(String, SecurityIndexManager, Iterator, ActionListener)} . */ private void findTokenFromRefreshToken(String refreshToken, Iterator backoff, ActionListener listener) { - if (refreshToken.length() == TOKEN_ID_LENGTH) { + if (refreshToken.length() == TOKEN_LENGTH) { // first check if token has the old format before the new version-prepended one logger.debug("Assuming an unversioned refresh token [{}], generated for node versions" - + " prior to the introduction of the version-header format.", refreshToken); + + " prior to the introduction of the version-header format.", refreshToken); findTokenFromRefreshToken(refreshToken, securityMainIndex, backoff, listener); } else { - try { - final Tuple versionAndRefreshTokenTuple = unpackVersionAndPayload(refreshToken); - final Version refreshTokenVersion = versionAndRefreshTokenTuple.v1(); - final String unencodedRefreshToken = versionAndRefreshTokenTuple.v2(); - if (false == refreshTokenVersion.onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED) - || unencodedRefreshToken.length() != TOKEN_ID_LENGTH) { - logger.debug("Decoded refresh token [{}] with version [{}] is invalid.", unencodedRefreshToken, refreshTokenVersion); + if (refreshToken.length() == HASHED_TOKEN_LENGTH) { + logger.debug("Assuming a hashed refresh token [{}] retrieved from the tokens index", refreshToken); + findTokenFromRefreshToken(refreshToken, securityTokensIndex, backoff, listener); + } else { + logger.debug("Assuming a refresh token [{}] provided from a client", refreshToken); + try { + final Tuple versionAndRefreshTokenTuple = unpackVersionAndPayload(refreshToken); + final Version refreshTokenVersion = versionAndRefreshTokenTuple.v1(); + final String unencodedRefreshToken = versionAndRefreshTokenTuple.v2(); + if (refreshTokenVersion.before(VERSION_TOKENS_INDEX_INTRODUCED) || unencodedRefreshToken.length() != TOKEN_LENGTH) { + logger.debug("Decoded refresh token [{}] with version [{}] is invalid.", unencodedRefreshToken, + refreshTokenVersion); + listener.onFailure(malformedTokenException()); + } else { + // TODO Remove this conditional after backporting to 7.x + if (refreshTokenVersion.onOrAfter(VERSION_HASHED_TOKENS)) { + final String hashedRefreshToken = hashTokenString(unencodedRefreshToken); + findTokenFromRefreshToken(hashedRefreshToken, securityTokensIndex, backoff, listener); + } else { + findTokenFromRefreshToken(unencodedRefreshToken, securityTokensIndex, backoff, listener); + } + } + } catch (IOException e) { + logger.debug(() -> new ParameterizedMessage("Could not decode refresh token [{}].", refreshToken), e); listener.onFailure(malformedTokenException()); - } else { - findTokenFromRefreshToken(unencodedRefreshToken, securityTokensIndex, backoff, listener); } - } catch (IOException e) { - logger.debug("Could not decode refresh token [" + refreshToken + "].", e); - listener.onFailure(malformedTokenException()); } } } @@ -763,7 +857,7 @@ public final class TokenService { final Consumer maybeRetryOnFailure = ex -> { if (backoff.hasNext()) { final TimeValue backofTimeValue = backoff.next(); - logger.debug("retrying after [" + backofTimeValue + "] back off"); + logger.debug("retrying after [{}] back off", backofTimeValue); final Runnable retryWithContextRunnable = client.threadPool().getThreadContext() .preserveContext(() -> findTokenFromRefreshToken(refreshToken, tokensIndexManager, backoff, listener)); client.threadPool().schedule(retryWithContextRunnable, backofTimeValue, GENERIC); @@ -821,13 +915,14 @@ public final class TokenService { * supersedes this one. The new document that contains the new access token and refresh token is created and finally the new access * token and refresh token are returned to the listener. */ - private void innerRefresh(String tokenDocId, Map source, long seqNo, long primaryTerm, Authentication clientAuth, - Iterator backoff, Instant refreshRequested, ActionListener> listener) { + private void innerRefresh(String refreshToken, String tokenDocId, Map source, long seqNo, long primaryTerm, + Authentication clientAuth, Iterator backoff, Instant refreshRequested, + ActionListener> listener) { logger.debug("Attempting to refresh token stored in token document [{}]", tokenDocId); final Consumer onFailure = ex -> listener.onFailure(traceLog("refresh token", tokenDocId, ex)); final Tuple> checkRefreshResult; try { - checkRefreshResult = checkTokenDocumentForRefresh(clock.instant(), clientAuth, source); + checkRefreshResult = checkTokenDocumentForRefresh(refreshRequested, clientAuth, source); } catch (DateTimeException | IllegalStateException e) { onFailure.accept(new ElasticsearchSecurityException("invalid token document", e)); return; @@ -838,23 +933,29 @@ public final class TokenService { } final RefreshTokenStatus refreshTokenStatus = checkRefreshResult.v1(); if (refreshTokenStatus.isRefreshed()) { - logger.debug("Token document [{}] was recently refreshed, when a new token document [{}] was generated. Reusing that result.", - tokenDocId, refreshTokenStatus.getSupersededBy()); - getSupersedingTokenDocAsyncWithRetry(refreshTokenStatus, backoff, listener); + logger.debug("Token document [{}] was recently refreshed, when a new token document was generated. Reusing that result.", + tokenDocId); + decryptAndReturnSupersedingTokens(refreshToken, refreshTokenStatus, listener); } else { - final String newUserTokenId = UUIDs.randomBase64UUID(); + final String newAccessTokenString = UUIDs.randomBase64UUID(); + final String newRefreshTokenString = UUIDs.randomBase64UUID(); final Version newTokenVersion = getTokenVersionCompatibility(); final Map updateMap = new HashMap<>(); updateMap.put("refreshed", true); - updateMap.put("refresh_time", clock.instant().toEpochMilli()); - if (newTokenVersion.onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { - // the superseding token document reference is formated as "|"; - // for now, only the ".security-tokens|" is a valid reference format - updateMap.put("superseded_by", securityTokensIndex.aliasName() + "|" + getTokenDocumentId(newUserTokenId)); - } else { - // preservers the format of the reference (without the alias prefix) - // so that old nodes in a mixed cluster can still understand it - updateMap.put("superseded_by", getTokenDocumentId(newUserTokenId)); + if (newTokenVersion.onOrAfter(VERSION_MULTIPLE_CONCURRENT_REFRESHES)) { + updateMap.put("refresh_time", clock.instant().toEpochMilli()); + try { + final byte[] iv = getRandomBytes(IV_BYTES); + final byte[] salt = getRandomBytes(SALT_BYTES); + String encryptedAccessAndRefreshToken = encryptSupersedingTokens(newAccessTokenString, + newRefreshTokenString, refreshToken, iv, salt); + updateMap.put("superseding.encrypted_tokens", encryptedAccessAndRefreshToken); + updateMap.put("superseding.encryption_iv", Base64.getEncoder().encodeToString(iv)); + updateMap.put("superseding.encryption_salt", Base64.getEncoder().encodeToString(salt)); + } catch (GeneralSecurityException e) { + logger.warn("could not encrypt access token and refresh token string", e); + onFailure.accept(invalidGrantException("could not refresh the requested token")); + } } assert seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO : "expected an assigned sequence number"; assert primaryTerm != SequenceNumbers.UNASSIGNED_PRIMARY_TERM : "expected an assigned primary term"; @@ -875,14 +976,15 @@ public final class TokenService { updateResponse.getGetResult().sourceAsMap())); final Tuple parsedTokens = parseTokensFromDocument(source, null); final UserToken toRefreshUserToken = parsedTokens.v1(); - createOAuth2Tokens(newUserTokenId, newTokenVersion, getTokensIndexForVersion(newTokenVersion), - toRefreshUserToken.getAuthentication(), clientAuth, toRefreshUserToken.getMetadata(), true, listener); + createOAuth2Tokens(newAccessTokenString, newRefreshTokenString, newTokenVersion, + getTokensIndexForVersion(newTokenVersion), toRefreshUserToken.getAuthentication(), clientAuth, + toRefreshUserToken.getMetadata(), listener); } else if (backoff.hasNext()) { logger.info("failed to update the original token document [{}], the update result was [{}]. Retrying", tokenDocId, updateResponse.getResult()); final Runnable retryWithContextRunnable = client.threadPool().getThreadContext() - .preserveContext(() -> innerRefresh(tokenDocId, source, seqNo, primaryTerm, clientAuth, backoff, - refreshRequested, listener)); + .preserveContext(() -> innerRefresh(refreshToken, tokenDocId, source, seqNo, primaryTerm, clientAuth, + backoff, refreshRequested, listener)); client.threadPool().schedule(retryWithContextRunnable, backoff.next(), GENERIC); } else { logger.info("failed to update the original token document [{}] after all retries, the update result was [{}]. ", @@ -898,8 +1000,8 @@ public final class TokenService { @Override public void onResponse(GetResponse response) { if (response.isExists()) { - innerRefresh(tokenDocId, response.getSource(), response.getSeqNo(), response.getPrimaryTerm(), - clientAuth, backoff, refreshRequested, listener); + innerRefresh(refreshToken, tokenDocId, response.getSource(), response.getSeqNo(), + response.getPrimaryTerm(), clientAuth, backoff, refreshRequested, listener); } else { logger.warn("could not find token document [{}] for refresh", tokenDocId); onFailure.accept(invalidGrantException("could not refresh the requested token")); @@ -927,8 +1029,8 @@ public final class TokenService { if (backoff.hasNext()) { logger.debug("failed to update the original token document [{}], retrying", tokenDocId); final Runnable retryWithContextRunnable = client.threadPool().getThreadContext() - .preserveContext(() -> innerRefresh(tokenDocId, source, seqNo, primaryTerm, clientAuth, backoff, - refreshRequested, listener)); + .preserveContext(() -> innerRefresh(refreshToken, tokenDocId, source, seqNo, primaryTerm, + clientAuth, backoff, refreshRequested, listener)); client.threadPool().schedule(retryWithContextRunnable, backoff.next(), GENERIC); } else { logger.warn("failed to update the original token document [{}], after all retries", tokenDocId); @@ -941,72 +1043,47 @@ public final class TokenService { } } - private void getSupersedingTokenDocAsyncWithRetry(RefreshTokenStatus refreshTokenStatus, Iterator backoff, - ActionListener> listener) { - final Consumer onFailure = ex -> listener - .onFailure(traceLog("get superseding token", refreshTokenStatus.getSupersededBy(), ex)); - getSupersedingTokenDocAsync(refreshTokenStatus, new ActionListener() { - private final Consumer maybeRetryOnFailure = ex -> { - if (backoff.hasNext()) { - final TimeValue backofTimeValue = backoff.next(); - logger.debug("retrying after [" + backofTimeValue + "] back off"); - final Runnable retryWithContextRunnable = client.threadPool().getThreadContext() - .preserveContext(() -> getSupersedingTokenDocAsync(refreshTokenStatus, this)); - client.threadPool().schedule(retryWithContextRunnable, backofTimeValue, GENERIC); - } else { - logger.warn("back off retries exhausted"); - onFailure.accept(ex); - } - }; - - @Override - public void onResponse(GetResponse response) { - if (response.isExists()) { - logger.debug("found superseding token document [{}] in index [{}] by following the [{}] reference", response.getId(), - response.getIndex(), refreshTokenStatus.getSupersededBy()); - final Tuple parsedTokens; - try { - parsedTokens = parseTokensFromDocument(response.getSource(), null); - } catch (IllegalStateException | DateTimeException e) { - logger.error("unable to decode existing user token", e); - listener.onFailure(new ElasticsearchSecurityException("could not refresh the requested token", e)); - return; - } - listener.onResponse(parsedTokens); - } else { - // We retry this since the creation of the superseding token document might already be in flight but not - // yet completed, triggered by a refresh request that came a few milliseconds ago - logger.info("could not find superseding token document from [{}] reference, retrying", - refreshTokenStatus.getSupersededBy()); - maybeRetryOnFailure.accept(invalidGrantException("could not refresh the requested token")); - } + /** + * Decrypts the values of the superseding access token and the refresh token, using a key derived from the superseded refresh token. It + * encodes the version and serializes the tokens before calling the listener, in the same manner as {@link #createOAuth2Tokens } does. + * + * @param refreshToken The refresh token that the user sent in the request, used to derive the decryption key + * @param refreshTokenStatus The {@link RefreshTokenStatus} containing information about the superseding tokens as retrieved from the + * index + * @param listener The listener to call upon completion with a {@link Tuple} containing the + * serialized access token and serialized refresh token as these will be returned to the client + */ + void decryptAndReturnSupersedingTokens(String refreshToken, RefreshTokenStatus refreshTokenStatus, + ActionListener> listener) { + final byte[] iv = Base64.getDecoder().decode(refreshTokenStatus.getIv()); + final byte[] salt = Base64.getDecoder().decode(refreshTokenStatus.getSalt()); + final byte[] encryptedSupersedingTokens = Base64.getDecoder().decode(refreshTokenStatus.getSupersedingTokens()); + try { + Cipher cipher = getDecryptionCipher(iv, refreshToken, salt); + final String supersedingTokens = new String(cipher.doFinal(encryptedSupersedingTokens), StandardCharsets.UTF_8); + final String[] decryptedTokens = supersedingTokens.split("\\|"); + if (decryptedTokens.length != 2) { + logger.warn("Decrypted tokens string is not correctly formatted"); + listener.onFailure(invalidGrantException("could not refresh the requested token")); } - - @Override - public void onFailure(Exception e) { - if (isShardNotAvailableException(e)) { - logger.info("could not find superseding token document from reference [{}], retrying", - refreshTokenStatus.getSupersededBy()); - maybeRetryOnFailure.accept(invalidGrantException("could not refresh the requested token")); - } else { - logger.warn("could not find superseding token document from reference [{}]", refreshTokenStatus.getSupersededBy()); - onFailure.accept(invalidGrantException("could not refresh the requested token")); - } - } - }); + listener.onResponse(new Tuple<>(prependVersionAndEncodeAccessToken(refreshTokenStatus.getVersion(), decryptedTokens[0]), + prependVersionAndEncodeRefreshToken(refreshTokenStatus.getVersion(), decryptedTokens[1]))); + } catch (GeneralSecurityException | IOException e) { + logger.warn("Could not get stored superseding token values", e); + listener.onFailure(invalidGrantException("could not refresh the requested token")); + } } - private void getSupersedingTokenDocAsync(RefreshTokenStatus refreshTokenStatus, ActionListener listener) { - final String supersedingDocReference = refreshTokenStatus.getSupersededBy(); - if (supersedingDocReference.startsWith(securityTokensIndex.aliasName() + "|")) { - // superseding token doc is stored on the new tokens index, irrespective of where the superseded token doc resides - final String supersedingDocId = supersedingDocReference.substring(securityTokensIndex.aliasName().length() + 1); - getTokenDocAsync(supersedingDocId, securityTokensIndex, listener); - } else { - assert false == supersedingDocReference - .contains("|") : "The superseding doc reference appears to contain an alias name but should not"; - getTokenDocAsync(supersedingDocReference, securityMainIndex, listener); - } + /* + * Encrypts the values of the superseding access token and the refresh token, using a key derived from the superseded refresh token. + * The tokens are concatenated to a string separated with `|` before encryption so that we only perform one encryption operation + * and that we only need to store one field + */ + String encryptSupersedingTokens(String supersedingAccessToken, String supersedingRefreshToken, + String refreshToken, byte[] iv, byte[] salt) throws GeneralSecurityException { + Cipher cipher = getEncryptionCipher(iv, refreshToken, salt); + final String supersedingTokens = supersedingAccessToken + "|" + supersedingRefreshToken; + return Base64.getEncoder().encodeToString(cipher.doFinal(supersedingTokens.getBytes(StandardCharsets.UTF_8))); } private void getTokenDocAsync(String tokenDocId, SecurityIndexManager tokensIndex, ActionListener listener) { @@ -1016,7 +1093,7 @@ public final class TokenService { () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, getRequest, listener, client::get)); } - private Version getTokenVersionCompatibility() { + Version getTokenVersionCompatibility() { // newly minted tokens are compatible with the min node version in the cluster return clusterService.state().nodes().getMinNodeVersion(); } @@ -1029,13 +1106,13 @@ public final class TokenService { * A refresh token has a fixed maximum lifetime of {@code ExpiredTokenRemover#MAXIMUM_TOKEN_LIFETIME_HOURS} hours. This checks if the * token document represents a valid token wrt this time interval. */ - private static Optional checkTokenDocumentExpired(Instant now, Map source) { - final Long creationEpochMilli = (Long) source.get("creation_time"); + private static Optional checkTokenDocumentExpired(Instant refreshRequested, Map src) { + final Long creationEpochMilli = (Long) src.get("creation_time"); if (creationEpochMilli == null) { throw new IllegalStateException("token document is missing creation time value"); } else { final Instant creationTime = Instant.ofEpochMilli(creationEpochMilli); - if (now.isAfter(creationTime.plus(ExpiredTokenRemover.MAXIMUM_TOKEN_LIFETIME_HOURS, ChronoUnit.HOURS))) { + if (refreshRequested.isAfter(creationTime.plus(ExpiredTokenRemover.MAXIMUM_TOKEN_LIFETIME_HOURS, ChronoUnit.HOURS))) { return Optional.of(invalidGrantException("token document has expired")); } else { return Optional.empty(); @@ -1048,17 +1125,17 @@ public final class TokenService { * parsed {@code RefreshTokenStatus} together with an {@code Optional} validation exception that encapsulates the various logic about * when and by who a token can be refreshed. */ - private static Tuple> checkTokenDocumentForRefresh(Instant now, - Authentication clientAuth, Map source) throws IllegalStateException, DateTimeException { + private static Tuple> checkTokenDocumentForRefresh( + Instant refreshRequested, Authentication clientAuth, Map source) throws IllegalStateException, DateTimeException { final RefreshTokenStatus refreshTokenStatus = RefreshTokenStatus.fromSourceMap(getRefreshTokenSourceMap(source)); final UserToken userToken = UserToken.fromSourceMap(getUserTokenSourceMap(source)); refreshTokenStatus.setVersion(userToken.getVersion()); - final ElasticsearchSecurityException validationException = checkTokenDocumentExpired(now, source).orElseGet(() -> { + final ElasticsearchSecurityException validationException = checkTokenDocumentExpired(refreshRequested, source).orElseGet(() -> { if (refreshTokenStatus.isInvalidated()) { return invalidGrantException("token has been invalidated"); } else { return checkClientCanRefresh(refreshTokenStatus, clientAuth) - .orElse(checkMultipleRefreshes(now, refreshTokenStatus).orElse(null)); + .orElse(checkMultipleRefreshes(refreshRequested, refreshTokenStatus).orElse(null)); } }); return new Tuple<>(refreshTokenStatus, Optional.ofNullable(validationException)); @@ -1111,13 +1188,14 @@ public final class TokenService { * @return An {@code Optional} containing the exception in case this refresh token cannot be reused, or an empty Optional if * refreshing is allowed. */ - private static Optional checkMultipleRefreshes(Instant now, RefreshTokenStatus refreshTokenStatus) { + private static Optional checkMultipleRefreshes(Instant refreshRequested, + RefreshTokenStatus refreshTokenStatus) { if (refreshTokenStatus.isRefreshed()) { if (refreshTokenStatus.getVersion().onOrAfter(VERSION_MULTIPLE_CONCURRENT_REFRESHES)) { - if (now.isAfter(refreshTokenStatus.getRefreshInstant().plus(30L, ChronoUnit.SECONDS))) { + if (refreshRequested.isAfter(refreshTokenStatus.getRefreshInstant().plus(30L, ChronoUnit.SECONDS))) { return Optional.of(invalidGrantException("token has already been refreshed more than 30 seconds in the past")); } - if (now.isBefore(refreshTokenStatus.getRefreshInstant().minus(30L, ChronoUnit.SECONDS))) { + if (refreshRequested.isBefore(refreshTokenStatus.getRefreshInstant().minus(30L, ChronoUnit.SECONDS))) { return Optional .of(invalidGrantException("token has been refreshed more than 30 seconds in the future, clock skew too great")); } @@ -1269,7 +1347,7 @@ public final class TokenService { private BytesReference createTokenDocument(UserToken userToken, @Nullable String refreshToken, @Nullable Authentication originatingClientAuth) { assert refreshToken == null || originatingClientAuth != null : "non-null refresh token " + refreshToken - + " requires non-null client authn " + originatingClientAuth; + + " requires non-null client authn " + originatingClientAuth; try (XContentBuilder builder = XContentFactory.jsonBuilder()) { builder.startObject(); builder.field("doc_type", TOKEN_DOC_TYPE); @@ -1332,21 +1410,14 @@ public final class TokenService { */ private Tuple parseTokensFromDocument(Map source, @Nullable Predicate> filter) throws IllegalStateException, DateTimeException { - final String plainRefreshToken = (String) ((Map) source.get("refresh_token")).get("token"); + final String hashedRefreshToken = (String) ((Map) source.get("refresh_token")).get("token"); final Map userTokenSource = (Map) ((Map) source.get("access_token")).get("user_token"); if (null != filter && filter.test(userTokenSource) == false) { return null; } final UserToken userToken = UserToken.fromSourceMap(userTokenSource); - if (userToken.getVersion().onOrAfter(VERSION_TOKENS_INDEX_INTRODUCED)) { - final String versionedRefreshToken = plainRefreshToken != null ? - prependVersionAndEncode(userToken.getVersion(), plainRefreshToken) : null; - return new Tuple<>(userToken, versionedRefreshToken); - } else { - // do not prepend version to refresh token as the audience node version cannot deal with it - return new Tuple<>(userToken, plainRefreshToken); - } + return new Tuple<>(userToken, hashedRefreshToken); } private static String getTokenDocumentId(UserToken userToken) { @@ -1450,7 +1521,7 @@ public final class TokenService { return expirationDelay; } - private Instant getExpirationTime() { + Instant getExpirationTime() { return clock.instant().plusSeconds(expirationDelay.getSeconds()); } @@ -1478,38 +1549,34 @@ public final class TokenService { return null; } - /** - * Serializes a token to a String containing the minimum compatible node version for decoding it back and either an encrypted - * representation of the token id for versions earlier to {@code #VERSION_ACCESS_TOKENS_UUIDS} or the token itself for versions after - * {@code #VERSION_ACCESS_TOKENS_UUIDS} - */ - public String getAccessTokenAsString(UserToken userToken) throws IOException, GeneralSecurityException { - if (userToken.getVersion().onOrAfter(VERSION_ACCESS_TOKENS_AS_UUIDS)) { + String prependVersionAndEncodeAccessToken(Version version, String accessToken) throws IOException, GeneralSecurityException { + if (version.onOrAfter(VERSION_ACCESS_TOKENS_AS_UUIDS)) { try (ByteArrayOutputStream os = new ByteArrayOutputStream(MINIMUM_BASE64_BYTES); OutputStream base64 = Base64.getEncoder().wrap(os); StreamOutput out = new OutputStreamStreamOutput(base64)) { - out.setVersion(userToken.getVersion()); - Version.writeVersion(userToken.getVersion(), out); - out.writeString(userToken.getId()); + out.setVersion(version); + Version.writeVersion(version, out); + out.writeString(accessToken); return new String(os.toByteArray(), StandardCharsets.UTF_8); } } else { // we know that the minimum length is larger than the default of the ByteArrayOutputStream so set the size to this explicitly - try (ByteArrayOutputStream os = new ByteArrayOutputStream(MINIMUM_BASE64_BYTES); + try (ByteArrayOutputStream os = new ByteArrayOutputStream(LEGACY_MINIMUM_BASE64_BYTES); OutputStream base64 = Base64.getEncoder().wrap(os); StreamOutput out = new OutputStreamStreamOutput(base64)) { - out.setVersion(userToken.getVersion()); + out.setVersion(version); KeyAndCache keyAndCache = keyCache.activeKeyCache; - Version.writeVersion(userToken.getVersion(), out); + Version.writeVersion(version, out); out.writeByteArray(keyAndCache.getSalt().bytes); out.writeByteArray(keyAndCache.getKeyHash().bytes); - final byte[] initializationVector = getNewInitializationVector(); + final byte[] initializationVector = getRandomBytes(IV_BYTES); out.writeByteArray(initializationVector); try (CipherOutputStream encryptedOutput = - new CipherOutputStream(out, getEncryptionCipher(initializationVector, keyAndCache, userToken.getVersion())); + new CipherOutputStream(out, getEncryptionCipher(initializationVector, keyAndCache, version)); StreamOutput encryptedStreamOutput = new OutputStreamStreamOutput(encryptedOutput)) { - encryptedStreamOutput.setVersion(userToken.getVersion()); - encryptedStreamOutput.writeString(userToken.getId()); + encryptedStreamOutput.setVersion(version); + encryptedStreamOutput.writeString(accessToken); + // StreamOutput needs to be closed explicitly because it wraps CipherOutputStream encryptedStreamOutput.close(); return new String(os.toByteArray(), StandardCharsets.UTF_8); } @@ -1517,7 +1584,7 @@ public final class TokenService { } } - private static String prependVersionAndEncode(Version version, String payload) { + static String prependVersionAndEncodeRefreshToken(Version version, String payload) { try (ByteArrayOutputStream os = new ByteArrayOutputStream(); OutputStream base64 = Base64.getEncoder().wrap(os); StreamOutput out = new OutputStreamStreamOutput(base64)) { @@ -1563,6 +1630,17 @@ public final class TokenService { return cipher; } + /** + * Initialize the encryption cipher using the provided password to derive the encryption key. + */ + Cipher getEncryptionCipher(byte[] iv, String password, byte[] salt) throws GeneralSecurityException { + SecretKey key = computeSecretKey(password.toCharArray(), salt, TOKENS_ENCRYPTION_KEY_ITERATIONS); + Cipher cipher = Cipher.getInstance(ENCRYPTION_CIPHER); + cipher.init(Cipher.ENCRYPT_MODE, key, new GCMParameterSpec(128, iv), secureRandom); + cipher.updateAAD(salt); + return cipher; + } + private void getKeyAsync(BytesKey decodedSalt, KeyAndCache keyAndCache, ActionListener listener) { final SecretKey decodeKey = keyAndCache.getKey(decodedSalt); if (decodeKey != null) { @@ -1595,21 +1673,31 @@ public final class TokenService { return cipher; } - // Package private for testing - byte[] getNewInitializationVector() { - final byte[] initializationVector = new byte[IV_BYTES]; - secureRandom.nextBytes(initializationVector); - return initializationVector; + /** + * Initialize the decryption cipher using the provided password to derive the decryption key. + */ + private Cipher getDecryptionCipher(byte[] iv, String password, byte[] salt) throws GeneralSecurityException { + SecretKey key = computeSecretKey(password.toCharArray(), salt, TOKENS_ENCRYPTION_KEY_ITERATIONS); + Cipher cipher = Cipher.getInstance(ENCRYPTION_CIPHER); + cipher.init(Cipher.DECRYPT_MODE, key, new GCMParameterSpec(128, iv), secureRandom); + cipher.updateAAD(salt); + return cipher; + } + + byte[] getRandomBytes(int length) { + final byte[] bytes = new byte[length]; + secureRandom.nextBytes(bytes); + return bytes; } /** * Generates a secret key based off of the provided password and salt. - * This method is computationally expensive. + * This method can be computationally expensive. */ - static SecretKey computeSecretKey(char[] rawPassword, byte[] salt) + static SecretKey computeSecretKey(char[] rawPassword, byte[] salt, int iterations) throws NoSuchAlgorithmException, InvalidKeySpecException { SecretKeyFactory secretKeyFactory = SecretKeyFactory.getInstance(KDF_ALGORITHM); - PBEKeySpec keySpec = new PBEKeySpec(rawPassword, salt, ITERATIONS, 128); + PBEKeySpec keySpec = new PBEKeySpec(rawPassword, salt, iterations, 128); SecretKey tmp = secretKeyFactory.generateSecret(keySpec); return new SecretKeySpec(tmp.getEncoded(), "AES"); } @@ -2003,7 +2091,7 @@ public final class TokenService { .setMaximumWeight(500L) .build(); try { - SecretKey secretKey = computeSecretKey(keyAndTimestamp.getKey().getChars(), salt.bytes); + SecretKey secretKey = computeSecretKey(keyAndTimestamp.getKey().getChars(), salt.bytes, TOKEN_SERVICE_KEY_ITERATIONS); keyCache.put(salt, secretKey); } catch (Exception e) { throw new IllegalStateException(e); @@ -2019,7 +2107,7 @@ public final class TokenService { public SecretKey getOrComputeKey(BytesKey decodedSalt) throws ExecutionException { return keyCache.computeIfAbsent(decodedSalt, (salt) -> { try (SecureString closeableChars = keyAndTimestamp.getKey().clone()) { - return computeSecretKey(closeableChars.getChars(), salt.bytes); + return computeSecretKey(closeableChars.getChars(), salt.bytes, TOKEN_SERVICE_KEY_ITERATIONS); } }); } @@ -2074,24 +2162,32 @@ public final class TokenService { /** * Contains metadata associated with the refresh token that is used for validity checks, but does not contain the proper token string. */ - private static final class RefreshTokenStatus { + static final class RefreshTokenStatus { private final boolean invalidated; private final String associatedUser; private final String associatedRealm; private final boolean refreshed; @Nullable private final Instant refreshInstant; - @Nullable private final String supersededBy; + @Nullable + private final String supersedingTokens; + @Nullable + private final String iv; + @Nullable + private final String salt; private Version version; - private RefreshTokenStatus(boolean invalidated, String associatedUser, String associatedRealm, boolean refreshed, - Instant refreshInstant, String supersededBy) { + // pkg-private for testing + RefreshTokenStatus(boolean invalidated, String associatedUser, String associatedRealm, boolean refreshed, Instant refreshInstant, + String supersedingTokens, String iv, String salt) { this.invalidated = invalidated; this.associatedUser = associatedUser; this.associatedRealm = associatedRealm; this.refreshed = refreshed; this.refreshInstant = refreshInstant; - this.supersededBy = supersededBy; + this.supersedingTokens = supersedingTokens; + this.iv = iv; + this.salt = salt; } boolean isInvalidated() { @@ -2114,8 +2210,19 @@ public final class TokenService { return refreshInstant; } - @Nullable String getSupersededBy() { - return supersededBy; + @Nullable + String getSupersedingTokens() { + return supersedingTokens; + } + + @Nullable + String getIv() { + return iv; + } + + @Nullable + String getSalt() { + return salt; } Version getVersion() { @@ -2149,8 +2256,11 @@ public final class TokenService { } final Long refreshEpochMilli = (Long) refreshTokenSource.get("refresh_time"); final Instant refreshInstant = refreshEpochMilli == null ? null : Instant.ofEpochMilli(refreshEpochMilli); - final String supersededBy = (String) refreshTokenSource.get("superseded_by"); - return new RefreshTokenStatus(invalidated, associatedUser, associatedRealm, refreshed, refreshInstant, supersededBy); + final String supersedingTokens = (String) refreshTokenSource.get("superseding.encrypted_tokens"); + final String iv = (String) refreshTokenSource.get("superseding.encryption_iv"); + final String salt = (String) refreshTokenSource.get("superseding.encryption_salt"); + return new RefreshTokenStatus(invalidated, associatedUser, associatedRealm, refreshed, refreshInstant, supersedingTokens, + iv, salt); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java index fe8b3823120..211ef0bcb19 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/UserToken.java @@ -50,7 +50,7 @@ public final class UserToken implements Writeable, ToXContentObject { /** * Create a new token with an autogenerated id */ - UserToken(Version version, Authentication authentication, Instant expirationTime, Map metadata) { + private UserToken(Version version, Authentication authentication, Instant expirationTime, Map metadata) { this(UUIDs.randomBase64UUID(), version, authentication, expirationTime, metadata); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java index 2505f55c98d..6278221a362 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/oidc/TransportOpenIdConnectLogoutActionTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -47,7 +48,6 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; -import org.elasticsearch.xpack.security.authc.UserToken; import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectRealm; import org.elasticsearch.xpack.security.authc.oidc.OpenIdConnectTestCase; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; @@ -195,20 +195,21 @@ public class TransportOpenIdConnectLogoutActionTests extends OpenIdConnectTestCa final JWT signedIdToken = generateIdToken(subject, randomAlphaOfLength(8), randomAlphaOfLength(8)); final User user = new User("oidc-user", new String[]{"superuser"}, null, null, null, true); final Authentication.RealmRef realmRef = new Authentication.RealmRef(oidcRealm.name(), OpenIdConnectRealmSettings.TYPE, "node01"); - final Authentication authentication = new Authentication(user, realmRef, null); - final Map tokenMetadata = new HashMap<>(); tokenMetadata.put("id_token_hint", signedIdToken.serialize()); tokenMetadata.put("oidc_realm", REALM_NAME); + final Authentication authentication = new Authentication(user, realmRef, null, null, Authentication.AuthenticationType.REALM, + tokenMetadata); - final PlainActionFuture> future = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, tokenMetadata, true, future); - final UserToken userToken = future.actionGet().v1(); - mockGetTokenFromId(userToken, false, client); - final String tokenString = tokenService.getAccessTokenAsString(userToken); + final PlainActionFuture> future = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, tokenMetadata, future); + final String accessToken = future.actionGet().v1(); + mockGetTokenFromId(tokenService, userTokenId, authentication, false, client); final OpenIdConnectLogoutRequest request = new OpenIdConnectLogoutRequest(); - request.setToken(tokenString); + request.setToken(accessToken); final PlainActionFuture listener = new PlainActionFuture<>(); action.doExecute(mock(Task.class), request, listener); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index 3f4ac894208..6a9c487bf20 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; @@ -66,7 +67,6 @@ import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; -import org.elasticsearch.xpack.security.authc.UserToken; import org.elasticsearch.xpack.security.authc.saml.SamlLogoutRequestHandler; import org.elasticsearch.xpack.security.authc.saml.SamlNameId; import org.elasticsearch.xpack.security.authc.saml.SamlRealm; @@ -252,9 +252,14 @@ public class TransportSamlInvalidateSessionActionTests extends SamlTestCase { } public void testInvalidateCorrectTokensFromLogoutRequest() throws Exception { + final String userTokenId1 = UUIDs.randomBase64UUID(); + final String refreshToken1 = UUIDs.randomBase64UUID(); + final String userTokenId2 = UUIDs.randomBase64UUID(); + final String refreshToken2 = UUIDs.randomBase64UUID(); storeToken(logoutRequest.getNameId(), randomAlphaOfLength(10)); - final Tuple tokenToInvalidate1 = storeToken(logoutRequest.getNameId(), logoutRequest.getSession()); - final Tuple tokenToInvalidate2 = storeToken(logoutRequest.getNameId(), logoutRequest.getSession()); + final Tuple tokenToInvalidate1 = storeToken(userTokenId1, refreshToken1, logoutRequest.getNameId(), + logoutRequest.getSession()); + storeToken(userTokenId2, refreshToken2, logoutRequest.getNameId(), logoutRequest.getSession()); storeToken(new SamlNameId(NameID.PERSISTENT, randomAlphaOfLength(16), null, null, null), logoutRequest.getSession()); assertThat(indexRequests.size(), equalTo(4)); @@ -316,27 +321,27 @@ public class TransportSamlInvalidateSessionActionTests extends SamlTestCase { assertThat(filter1.get(1), instanceOf(TermQueryBuilder.class)); assertThat(((TermQueryBuilder) filter1.get(1)).fieldName(), equalTo("refresh_token.token")); assertThat(((TermQueryBuilder) filter1.get(1)).value(), - equalTo(TokenService.unpackVersionAndPayload(tokenToInvalidate1.v2()).v2())); + equalTo(TokenService.hashTokenString(TokenService.unpackVersionAndPayload(tokenToInvalidate1.v2()).v2()))); assertThat(bulkRequests.size(), equalTo(4)); // 4 updates (refresh-token + access-token) // Invalidate refresh token 1 assertThat(bulkRequests.get(0).requests().get(0), instanceOf(UpdateRequest.class)); - assertThat(bulkRequests.get(0).requests().get(0).id(), equalTo("token_" + tokenToInvalidate1.v1().getId())); + assertThat(bulkRequests.get(0).requests().get(0).id(), equalTo("token_" + TokenService.hashTokenString(userTokenId1))); UpdateRequest updateRequest1 = (UpdateRequest) bulkRequests.get(0).requests().get(0); assertThat(updateRequest1.toString().contains("refresh_token"), equalTo(true)); // Invalidate access token 1 assertThat(bulkRequests.get(1).requests().get(0), instanceOf(UpdateRequest.class)); - assertThat(bulkRequests.get(1).requests().get(0).id(), equalTo("token_" + tokenToInvalidate1.v1().getId())); + assertThat(bulkRequests.get(1).requests().get(0).id(), equalTo("token_" + TokenService.hashTokenString(userTokenId1))); UpdateRequest updateRequest2 = (UpdateRequest) bulkRequests.get(1).requests().get(0); assertThat(updateRequest2.toString().contains("access_token"), equalTo(true)); // Invalidate refresh token 2 assertThat(bulkRequests.get(2).requests().get(0), instanceOf(UpdateRequest.class)); - assertThat(bulkRequests.get(2).requests().get(0).id(), equalTo("token_" + tokenToInvalidate2.v1().getId())); + assertThat(bulkRequests.get(2).requests().get(0).id(), equalTo("token_" + TokenService.hashTokenString(userTokenId2))); UpdateRequest updateRequest3 = (UpdateRequest) bulkRequests.get(2).requests().get(0); assertThat(updateRequest3.toString().contains("refresh_token"), equalTo(true)); // Invalidate access token 2 assertThat(bulkRequests.get(3).requests().get(0), instanceOf(UpdateRequest.class)); - assertThat(bulkRequests.get(3).requests().get(0).id(), equalTo("token_" + tokenToInvalidate2.v1().getId())); + assertThat(bulkRequests.get(3).requests().get(0).id(), equalTo("token_" + TokenService.hashTokenString(userTokenId2))); UpdateRequest updateRequest4 = (UpdateRequest) bulkRequests.get(3).requests().get(0); assertThat(updateRequest4.toString().contains("access_token"), equalTo(true)); } @@ -359,13 +364,19 @@ public class TransportSamlInvalidateSessionActionTests extends SamlTestCase { }; } - private Tuple storeToken(SamlNameId nameId, String session) throws IOException { + private Tuple storeToken(String userTokenId, String refreshToken, SamlNameId nameId, String session) { Authentication authentication = new Authentication(new User("bob"), new RealmRef("native", NativeRealmSettings.TYPE, "node01"), null); final Map metadata = samlRealm.createTokenMetadata(nameId, session); - final PlainActionFuture> future = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, metadata, true, future); + final PlainActionFuture> future = new PlainActionFuture<>(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, metadata, future); return future.actionGet(); } + private Tuple storeToken(SamlNameId nameId, String session) { + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + return storeToken(userTokenId, refreshToken, nameId, session); + } + } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java index 1652122bf6e..9b9dc79a29c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; @@ -55,7 +56,6 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; -import org.elasticsearch.xpack.security.authc.UserToken; import org.elasticsearch.xpack.security.authc.saml.SamlNameId; import org.elasticsearch.xpack.security.authc.saml.SamlRealm; import org.elasticsearch.xpack.security.authc.saml.SamlRealmTests; @@ -236,19 +236,21 @@ public class TransportSamlLogoutActionTests extends SamlTestCase { .map(); final User user = new User("punisher", new String[]{"superuser"}, null, null, userMetaData, true); final Authentication.RealmRef realmRef = new Authentication.RealmRef(samlRealm.name(), SamlRealmSettings.TYPE, "node01"); - final Authentication authentication = new Authentication(user, realmRef, null); - final Map tokenMetaData = samlRealm.createTokenMetadata( - new SamlNameId(NameID.TRANSIENT, nameId, null, null, null), session); + new SamlNameId(NameID.TRANSIENT, nameId, null, null, null), session); + final Authentication authentication = new Authentication(user, realmRef, null, null, Authentication.AuthenticationType.REALM, + tokenMetaData); - final PlainActionFuture> future = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, tokenMetaData, true, future); - final UserToken userToken = future.actionGet().v1(); - mockGetTokenFromId(userToken, false, client); - final String tokenString = tokenService.getAccessTokenAsString(userToken); + + final PlainActionFuture> future = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, tokenMetaData, future); + final String accessToken = future.actionGet().v1(); + mockGetTokenFromId(tokenService, userTokenId, authentication, false, client); final SamlLogoutRequest request = new SamlLogoutRequest(); - request.setToken(tokenString); + request.setToken(accessToken); final PlainActionFuture listener = new PlainActionFuture<>(); action.doExecute(mock(Task.class), request, listener); final SamlLogoutResponse response = listener.get(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index c7994888a26..67ce5ce2b27 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -1108,14 +1108,16 @@ public class AuthenticationServiceTests extends ESTestCase { User user = new User("_username", "r1"); final AtomicBoolean completed = new AtomicBoolean(false); final Authentication expected = new Authentication(user, new RealmRef("realm", "custom", "node"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { Authentication originatingAuth = new Authentication(new User("creator"), new RealmRef("test", "test", "test"), null); - tokenService.createOAuth2Tokens(expected, originatingAuth, Collections.emptyMap(), true, tokenFuture); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, expected, originatingAuth, Collections.emptyMap(), tokenFuture); } - String token = tokenService.getAccessTokenAsString(tokenFuture.get().v1()); + String token = tokenFuture.get().v1(); when(client.prepareMultiGet()).thenReturn(new MultiGetRequestBuilder(client, MultiGetAction.INSTANCE)); - mockGetTokenFromId(tokenFuture.get().v1(), false, client); + mockGetTokenFromId(tokenService, userTokenId, expected, false, client); when(securityIndex.isAvailable()).thenReturn(true); when(securityIndex.indexExists()).thenReturn(true); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { @@ -1191,13 +1193,15 @@ public class AuthenticationServiceTests extends ESTestCase { when(securityIndex.indexExists()).thenReturn(true); User user = new User("_username", "r1"); final Authentication expected = new Authentication(user, new RealmRef("realm", "custom", "node"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { Authentication originatingAuth = new Authentication(new User("creator"), new RealmRef("test", "test", "test"), null); - tokenService.createOAuth2Tokens(expected, originatingAuth, Collections.emptyMap(), true, tokenFuture); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, expected, originatingAuth, Collections.emptyMap(), tokenFuture); } - String token = tokenService.getAccessTokenAsString(tokenFuture.get().v1()); - mockGetTokenFromId(tokenFuture.get().v1(), true, client); + String token = tokenFuture.get().v1(); + mockGetTokenFromId(tokenService, userTokenId, expected, true, client); doAnswer(invocationOnMock -> { ((Runnable) invocationOnMock.getArguments()[1]).run(); return null; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 86468d25b62..38af62d288e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -28,8 +28,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -62,10 +60,7 @@ import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; -import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.OutputStream; -import java.nio.charset.StandardCharsets; import java.security.GeneralSecurityException; import java.time.Clock; import java.time.Instant; @@ -75,7 +70,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; -import javax.crypto.CipherOutputStream; import javax.crypto.SecretKey; import static java.time.Clock.systemUTC; @@ -169,15 +163,16 @@ public class TokenServiceTests extends ESTestCase { public void testAttachAndGetToken() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - requestContext.putHeader("Authorization", randomFrom("Bearer ", "BEARER ", "bearer ") + tokenService.getAccessTokenAsString(token)); + requestContext.putHeader("Authorization", randomFrom("Bearer ", "BEARER ", "bearer ") + accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -214,16 +209,21 @@ public class TokenServiceTests extends ESTestCase { public void testRotateKey() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -240,15 +240,18 @@ public class TokenServiceTests extends ESTestCase { assertAuthentication(authentication, serialized.getAuthentication()); } - PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, newTokenFuture); - final UserToken newToken = newTokenFuture.get().v1(); - assertNotNull(newToken); - assertNotEquals(getDeprecatedAccessTokenString(tokenService, newToken), getDeprecatedAccessTokenString(tokenService, token)); + PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); + final String newUserTokenId = UUIDs.randomBase64UUID(); + final String newRefreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(newUserTokenId, newRefreshToken, authentication, authentication, Collections.emptyMap(), + newTokenFuture); + final String newAccessToken = newTokenFuture.get().v1(); + assertNotNull(newAccessToken); + assertNotEquals(newAccessToken, accessToken); requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, newToken)); - mockGetTokenFromId(newToken, false); + storeTokenHeader(requestContext, newAccessToken); + mockGetTokenFromId(tokenService, newUserTokenId, authentication, false); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -267,6 +270,10 @@ public class TokenServiceTests extends ESTestCase { public void testKeyExchange() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } int numRotations = randomIntBetween(1, 5); for (int i = 0; i < numRotations; i++) { rotateKeys(tokenService); @@ -275,20 +282,21 @@ public class TokenServiceTests extends ESTestCase { otherTokenService.refreshMetaData(tokenService.getTokenMetaData()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); otherTokenService.getAndValidateToken(requestContext, future); UserToken serialized = future.get(); - assertEquals(authentication, serialized.getAuthentication()); + assertAuthentication(serialized.getAuthentication(), authentication); } rotateKeys(tokenService); @@ -299,22 +307,27 @@ public class TokenServiceTests extends ESTestCase { PlainActionFuture future = new PlainActionFuture<>(); otherTokenService.getAndValidateToken(requestContext, future); UserToken serialized = future.get(); - assertEquals(authentication, serialized.getAuthentication()); + assertAuthentication(serialized.getAuthentication(), authentication); } } public void testPruneKeys() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.2.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -337,11 +350,14 @@ public class TokenServiceTests extends ESTestCase { assertAuthentication(authentication, serialized.getAuthentication()); } - PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, newTokenFuture); - final UserToken newToken = newTokenFuture.get().v1(); - assertNotNull(newToken); - assertNotEquals(getDeprecatedAccessTokenString(tokenService, newToken), getDeprecatedAccessTokenString(tokenService, token)); + PlainActionFuture> newTokenFuture = new PlainActionFuture<>(); + final String newUserTokenId = UUIDs.randomBase64UUID(); + final String newRefreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(newUserTokenId, newRefreshToken, authentication, authentication, Collections.emptyMap(), + newTokenFuture); + final String newAccessToken = newTokenFuture.get().v1(); + assertNotNull(newAccessToken); + assertNotEquals(newAccessToken, accessToken); metaData = tokenService.pruneKeys(1); tokenService.refreshMetaData(metaData); @@ -354,8 +370,8 @@ public class TokenServiceTests extends ESTestCase { } requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, newToken)); - mockGetTokenFromId(newToken, false); + storeTokenHeader(requestContext, newAccessToken); + mockGetTokenFromId(tokenService, newUserTokenId, authentication, false); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); tokenService.getAndValidateToken(requestContext, future); @@ -367,16 +383,21 @@ public class TokenServiceTests extends ESTestCase { public void testPassphraseWorks() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, getDeprecatedAccessTokenString(tokenService, token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -396,29 +417,40 @@ public class TokenServiceTests extends ESTestCase { public void testGetTokenWhenKeyCacheHasExpired() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); + // This test only makes sense in mixed clusters with pre v7.1.0 nodes where the Key is actually used + if (null == oldNode) { + oldNode = addAnotherDataNodeWithVersion(this.clusterService, randomFrom(Version.V_6_7_0, Version.V_7_1_0)); + } Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - UserToken token = tokenFuture.get().v1(); - assertThat(getDeprecatedAccessTokenString(tokenService, token), notNullValue()); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + String accessToken = tokenFuture.get().v1(); + assertThat(accessToken, notNullValue()); tokenService.clearActiveKeyCache(); - assertThat(getDeprecatedAccessTokenString(tokenService, token), notNullValue()); + + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + accessToken = tokenFuture.get().v1(); + assertThat(accessToken, notNullValue()); } public void testInvalidatedToken() throws Exception { when(securityMainIndex.indexExists()).thenReturn(true); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - mockGetTokenFromId(token, true); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); + mockGetTokenFromId(tokenService, userTokenId, authentication, true); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, tokenService.getAccessTokenAsString(token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { PlainActionFuture future = new PlainActionFuture<>(); @@ -437,8 +469,10 @@ public class TokenServiceTests extends ESTestCase { public void testComputeSecretKeyIsConsistent() throws Exception { byte[] saltArr = new byte[32]; random().nextBytes(saltArr); - SecretKey key = TokenService.computeSecretKey("some random passphrase".toCharArray(), saltArr); - SecretKey key2 = TokenService.computeSecretKey("some random passphrase".toCharArray(), saltArr); + SecretKey key = + TokenService.computeSecretKey("some random passphrase".toCharArray(), saltArr, TokenService.TOKEN_SERVICE_KEY_ITERATIONS); + SecretKey key2 = + TokenService.computeSecretKey("some random passphrase".toCharArray(), saltArr, TokenService.TOKEN_SERVICE_KEY_ITERATIONS); assertArrayEquals(key.getEncoded(), key2.getEncoded()); } @@ -469,14 +503,15 @@ public class TokenServiceTests extends ESTestCase { ClockMock clock = ClockMock.frozen(); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, clock); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - mockGetTokenFromId(token, false); - authentication = token.getAuthentication(); + final String userTokenId = UUIDs.randomBase64UUID(); + UserToken userToken = new UserToken(userTokenId, tokenService.getTokenVersionCompatibility(), authentication, + tokenService.getExpirationTime(), Collections.emptyMap()); + mockGetTokenFromId(userToken, false); + final String accessToken = tokenService.prependVersionAndEncodeAccessToken(tokenService.getTokenVersionCompatibility(), userTokenId + ); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, tokenService.getAccessTokenAsString(token)); + storeTokenHeader(requestContext, accessToken); try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { // the clock is still frozen, so the cookie should be valid @@ -520,7 +555,7 @@ public class TokenServiceTests extends ESTestCase { TokenService tokenService = new TokenService(Settings.builder() .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), false) .build(), - Clock.systemUTC(), client, licenseState, securityMainIndex, securityTokensIndex, clusterService); + Clock.systemUTC(), client, licenseState, securityMainIndex, securityTokensIndex, clusterService); IllegalStateException e = expectThrows(IllegalStateException.class, () -> tokenService.createOAuth2Tokens(null, null, null, true, null)); assertEquals("security tokens are not enabled", e.getMessage()); @@ -578,14 +613,15 @@ public class TokenServiceTests extends ESTestCase { public void testIndexNotAvailable() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - PlainActionFuture> tokenFuture = new PlainActionFuture<>(); - tokenService.createOAuth2Tokens(authentication, authentication, Collections.emptyMap(), true, tokenFuture); - final UserToken token = tokenFuture.get().v1(); - assertNotNull(token); - //mockGetTokenFromId(token, false); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String userTokenId = UUIDs.randomBase64UUID(); + final String refreshToken = UUIDs.randomBase64UUID(); + tokenService.createOAuth2Tokens(userTokenId, refreshToken, authentication, authentication, Collections.emptyMap(), tokenFuture); + final String accessToken = tokenFuture.get().v1(); + assertNotNull(accessToken); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(requestContext, tokenService.getAccessTokenAsString(token)); + storeTokenHeader(requestContext, accessToken); doAnswer(invocationOnMock -> { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; @@ -621,34 +657,64 @@ public class TokenServiceTests extends ESTestCase { when(tokensIndex.isAvailable()).thenReturn(true); when(tokensIndex.indexExists()).thenReturn(true); - mockGetTokenFromId(token, false); + mockGetTokenFromId(tokenService, userTokenId, authentication, false); future = new PlainActionFuture<>(); tokenService.getAndValidateToken(requestContext, future); - assertEquals(future.get().getAuthentication(), token.getAuthentication()); + assertAuthentication(future.get().getAuthentication(), authentication); } } public void testGetAuthenticationWorksWithExpiredUserToken() throws Exception { TokenService tokenService = createTokenService(tokenServiceEnabledSettings, Clock.systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - UserToken expired = new UserToken(authentication, Instant.now().minus(3L, ChronoUnit.DAYS)); + final String userTokenId = UUIDs.randomBase64UUID(); + UserToken expired = new UserToken(userTokenId, tokenService.getTokenVersionCompatibility(), authentication, + Instant.now().minus(3L, ChronoUnit.DAYS), Collections.emptyMap()); mockGetTokenFromId(expired, false); - String userTokenString = tokenService.getAccessTokenAsString(expired); + final String accessToken = tokenService.prependVersionAndEncodeAccessToken(tokenService.getTokenVersionCompatibility(), userTokenId + ); PlainActionFuture>> authFuture = new PlainActionFuture<>(); - tokenService.getAuthenticationAndMetaData(userTokenString, authFuture); + tokenService.getAuthenticationAndMetaData(accessToken, authFuture); Authentication retrievedAuth = authFuture.actionGet().v1(); - assertEquals(authentication, retrievedAuth); + assertAuthentication(authentication, retrievedAuth); + } + + public void testSupercedingTokenEncryption() throws Exception { + TokenService tokenService = createTokenService(tokenServiceEnabledSettings, Clock.systemUTC()); + Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); + PlainActionFuture> tokenFuture = new PlainActionFuture<>(); + final String refrehToken = UUIDs.randomBase64UUID(); + final String newAccessToken = UUIDs.randomBase64UUID(); + final String newRefreshToken = UUIDs.randomBase64UUID(); + final byte[] iv = tokenService.getRandomBytes(TokenService.IV_BYTES); + final byte[] salt = tokenService.getRandomBytes(TokenService.SALT_BYTES); + final Version version = tokenService.getTokenVersionCompatibility(); + String encryptedTokens = tokenService.encryptSupersedingTokens(newAccessToken, newRefreshToken, refrehToken, iv, + salt); + TokenService.RefreshTokenStatus refreshTokenStatus = new TokenService.RefreshTokenStatus(false, + authentication.getUser().principal(), authentication.getAuthenticatedBy().getName(), true, Instant.now().minusSeconds(5L), + encryptedTokens, Base64.getEncoder().encodeToString(iv), Base64.getEncoder().encodeToString(salt)); + refreshTokenStatus.setVersion(version); + tokenService.decryptAndReturnSupersedingTokens(refrehToken, refreshTokenStatus, tokenFuture); + if (version.onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { + // previous versions serialized the access token encrypted and the cipher text was different each time (due to different IVs) + assertThat(tokenService.prependVersionAndEncodeAccessToken(version, newAccessToken), equalTo(tokenFuture.get().v1())); + } + assertThat(TokenService.prependVersionAndEncodeRefreshToken(version, newRefreshToken), equalTo(tokenFuture.get().v2())); } public void testCannotValidateTokenIfLicenseDoesNotAllowTokens() throws Exception { when(licenseState.isTokenServiceAllowed()).thenReturn(true); TokenService tokenService = createTokenService(tokenServiceEnabledSettings, Clock.systemUTC()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); - UserToken token = new UserToken(authentication, Instant.now().plusSeconds(180)); + final String userTokenId = UUIDs.randomBase64UUID(); + UserToken token = new UserToken(userTokenId, tokenService.getTokenVersionCompatibility(), authentication, + Instant.now().plusSeconds(180), Collections.emptyMap()); mockGetTokenFromId(token, false); - + final String accessToken = tokenService.prependVersionAndEncodeAccessToken(tokenService.getTokenVersionCompatibility(), userTokenId + ); final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - storeTokenHeader(threadContext, tokenService.getAccessTokenAsString(token)); + storeTokenHeader(threadContext, tokenService.prependVersionAndEncodeAccessToken(token.getVersion(), accessToken)); PlainActionFuture authFuture = new PlainActionFuture<>(); when(licenseState.isTokenServiceAllowed()).thenReturn(false); @@ -661,18 +727,30 @@ public class TokenServiceTests extends ESTestCase { return new TokenService(settings, clock, client, licenseState, securityMainIndex, securityTokensIndex, clusterService); } - private void mockGetTokenFromId(UserToken userToken, boolean isExpired) { - mockGetTokenFromId(userToken, isExpired, client); + private void mockGetTokenFromId(TokenService tokenService, String accessToken, Authentication authentication, boolean isExpired) { + mockGetTokenFromId(tokenService, accessToken, authentication, isExpired, client); } - public static void mockGetTokenFromId(UserToken userToken, boolean isExpired, Client client) { + public static void mockGetTokenFromId(TokenService tokenService, String userTokenId, Authentication authentication, boolean isExpired, + Client client) { doAnswer(invocationOnMock -> { GetRequest request = (GetRequest) invocationOnMock.getArguments()[0]; ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; GetResponse response = mock(GetResponse.class); - if (userToken.getId().equals(request.id().replace("token_", ""))) { + Version tokenVersion = tokenService.getTokenVersionCompatibility(); + final String possiblyHashedUserTokenId; + if (tokenVersion.onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { + possiblyHashedUserTokenId = TokenService.hashTokenString(userTokenId); + } else { + possiblyHashedUserTokenId = userTokenId; + } + if (possiblyHashedUserTokenId.equals(request.id().replace("token_", ""))) { when(response.isExists()).thenReturn(true); Map sourceMap = new HashMap<>(); + final Authentication tokenAuth = new Authentication(authentication.getUser(), authentication.getAuthenticatedBy(), + authentication.getLookedUpBy(), tokenVersion, AuthenticationType.TOKEN, authentication.getMetadata()); + final UserToken userToken = new UserToken(possiblyHashedUserTokenId, tokenVersion, tokenAuth, + tokenService.getExpirationTime(), authentication.getMetadata()); try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { userToken.toXContent(builder, ToXContent.EMPTY_PARAMS); Map accessTokenMap = new HashMap<>(); @@ -688,35 +766,42 @@ public class TokenServiceTests extends ESTestCase { }).when(client).get(any(GetRequest.class), any(ActionListener.class)); } + private void mockGetTokenFromId(UserToken userToken, boolean isExpired) { + doAnswer(invocationOnMock -> { + GetRequest request = (GetRequest) invocationOnMock.getArguments()[0]; + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + GetResponse response = mock(GetResponse.class); + final String possiblyHashedUserTokenId; + if (userToken.getVersion().onOrAfter(TokenService.VERSION_ACCESS_TOKENS_AS_UUIDS)) { + possiblyHashedUserTokenId = TokenService.hashTokenString(userToken.getId()); + } else { + possiblyHashedUserTokenId = userToken.getId(); + } + if (possiblyHashedUserTokenId.equals(request.id().replace("token_", ""))) { + when(response.isExists()).thenReturn(true); + Map sourceMap = new HashMap<>(); + try (XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent())) { + userToken.toXContent(builder, ToXContent.EMPTY_PARAMS); + Map accessTokenMap = new HashMap<>(); + Map userTokenMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), + Strings.toString(builder), false); + userTokenMap.put("id", possiblyHashedUserTokenId); + accessTokenMap.put("user_token", userTokenMap); + accessTokenMap.put("invalidated", isExpired); + sourceMap.put("access_token", accessTokenMap); + } + when(response.getSource()).thenReturn(sourceMap); + } + listener.onResponse(response); + return Void.TYPE; + }).when(client).get(any(GetRequest.class), any(ActionListener.class)); + } + public static void assertAuthentication(Authentication result, Authentication expected) { assertEquals(expected.getUser(), result.getUser()); assertEquals(expected.getAuthenticatedBy(), result.getAuthenticatedBy()); assertEquals(expected.getLookedUpBy(), result.getLookedUpBy()); assertEquals(expected.getMetadata(), result.getMetadata()); - assertEquals(AuthenticationType.TOKEN, result.getAuthenticationType()); - } - - protected String getDeprecatedAccessTokenString(TokenService tokenService, UserToken userToken) throws IOException, - GeneralSecurityException { - try (ByteArrayOutputStream os = new ByteArrayOutputStream(TokenService.MINIMUM_BASE64_BYTES); - OutputStream base64 = Base64.getEncoder().wrap(os); - StreamOutput out = new OutputStreamStreamOutput(base64)) { - out.setVersion(Version.V_7_0_0); - TokenService.KeyAndCache keyAndCache = tokenService.getActiveKeyCache(); - Version.writeVersion(Version.V_7_0_0, out); - out.writeByteArray(keyAndCache.getSalt().bytes); - out.writeByteArray(keyAndCache.getKeyHash().bytes); - final byte[] initializationVector = tokenService.getNewInitializationVector(); - out.writeByteArray(initializationVector); - try (CipherOutputStream encryptedOutput = - new CipherOutputStream(out, tokenService.getEncryptionCipher(initializationVector, keyAndCache, Version.V_7_0_0)); - StreamOutput encryptedStreamOutput = new OutputStreamStreamOutput(encryptedOutput)) { - encryptedStreamOutput.setVersion(Version.V_7_0_0); - encryptedStreamOutput.writeString(userToken.getId()); - encryptedStreamOutput.close(); - return new String(os.toByteArray(), StandardCharsets.UTF_8); - } - } } private DiscoveryNode addAnotherDataNodeWithVersion(ClusterService clusterService, Version version) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java index 6086dc642d2..e51945cd904 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/HasherTests.java @@ -50,6 +50,10 @@ public class HasherTests extends ESTestCase { testHasherSelfGenerated(Hasher.SSHA256); } + public void testSHA256SelfGenerated() throws Exception { + testHasherSelfGenerated(Hasher.SHA256); + } + public void testNoopSelfGenerated() throws Exception { testHasherSelfGenerated(Hasher.NOOP); } From c72c76b5ea0dbdfe220de4ad0e623772b8de582f Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Mon, 20 May 2019 10:17:08 -0400 Subject: [PATCH 06/25] Update to joda time 2.10.2 (#42199) --- buildSrc/version.properties | 2 +- server/licenses/joda-time-2.10.1.jar.sha1 | 1 - server/licenses/joda-time-2.10.2.jar.sha1 | 1 + .../java/org/elasticsearch/common/time/DateUtilsTests.java | 3 ++- .../plugin/sql/sql-action/licenses/joda-time-2.10.1.jar.sha1 | 1 - .../plugin/sql/sql-action/licenses/joda-time-2.10.2.jar.sha1 | 1 + 6 files changed, 5 insertions(+), 4 deletions(-) delete mode 100644 server/licenses/joda-time-2.10.1.jar.sha1 create mode 100644 server/licenses/joda-time-2.10.2.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.1.jar.sha1 create mode 100644 x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.2.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 21f4d8586e1..4447417e1dd 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -21,7 +21,7 @@ slf4j = 1.6.2 jna = 4.5.1 netty = 4.1.35.Final -joda = 2.10.1 +joda = 2.10.2 # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) diff --git a/server/licenses/joda-time-2.10.1.jar.sha1 b/server/licenses/joda-time-2.10.1.jar.sha1 deleted file mode 100644 index 75e809754ec..00000000000 --- a/server/licenses/joda-time-2.10.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ac3dbf89dbf2ee385185dd0cd3064fe789efee0 \ No newline at end of file diff --git a/server/licenses/joda-time-2.10.2.jar.sha1 b/server/licenses/joda-time-2.10.2.jar.sha1 new file mode 100644 index 00000000000..9cbac57161c --- /dev/null +++ b/server/licenses/joda-time-2.10.2.jar.sha1 @@ -0,0 +1 @@ +a079fc39ccc3de02acdeb7117443e5d9bd431687 \ No newline at end of file diff --git a/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java b/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java index 2b125127f66..4ef095da049 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateUtilsTests.java @@ -45,7 +45,8 @@ import static org.hamcrest.Matchers.is; public class DateUtilsTests extends ESTestCase { private static final Set IGNORE = new HashSet<>(Arrays.asList( - "Eire", "Europe/Dublin" // dublin timezone in joda does not account for DST + "Eire", "Europe/Dublin", // dublin timezone in joda does not account for DST + "Asia/Qostanay" // this has been added in joda 2.10.2 but is not part of the JDK 12.0.1 tzdata yet )); public void testTimezoneIds() { diff --git a/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.1.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.1.jar.sha1 deleted file mode 100644 index 75e809754ec..00000000000 --- a/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9ac3dbf89dbf2ee385185dd0cd3064fe789efee0 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.2.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.2.jar.sha1 new file mode 100644 index 00000000000..9cbac57161c --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/joda-time-2.10.2.jar.sha1 @@ -0,0 +1 @@ +a079fc39ccc3de02acdeb7117443e5d9bd431687 \ No newline at end of file From 6ae6f57d39f473e4968700a28a582b93fe3a3bf4 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Mon, 20 May 2019 12:07:29 -0400 Subject: [PATCH 07/25] [7.x Backport] Force selection of calendar or fixed intervals (#41906) The date_histogram accepts an interval which can be either a calendar interval (DST-aware, leap seconds, arbitrary length of months, etc) or fixed interval (strict multiples of SI units). Unfortunately this is inferred by first trying to parse as a calendar interval, then falling back to fixed if that fails. This leads to confusing arrangement where `1d` == calendar, but `2d` == fixed. And if you want a day of fixed time, you have to specify `24h` (e.g. the next smallest unit). This arrangement is very error-prone for users. This PR adds `calendar_interval` and `fixed_interval` parameters to any code that uses intervals (date_histogram, rollup, composite, datafeed, etc). Calendar only accepts calendar intervals, fixed accepts any combination of units (meaning `1d` can be used to specify `24h` in fixed time), and both are mutually exclusive. The old interval behavior is deprecated and will throw a deprecation warning. It is also mutually exclusive with the two new parameters. In the future the old dual-purpose interval will be removed. The change applies to both REST and java clients. --- .../job/config/DateHistogramGroupConfig.java | 139 +++- .../org/elasticsearch/client/RollupIT.java | 12 +- .../documentation/RollupDocumentationIT.java | 14 +- .../ml/datafeed/DatafeedConfigTests.java | 3 +- .../rollup/GetRollupJobResponseTests.java | 2 +- .../rollup/PutRollupJobRequestTests.java | 2 +- .../config/DateHistogramGroupConfigTests.java | 16 +- docs/build.gradle | 6 +- .../bucket/datehistogram-aggregation.asciidoc | 4 +- docs/java-api/aggs.asciidoc | 2 +- docs/java-api/search.asciidoc | 2 +- .../high-level/rollup/put_job.asciidoc | 2 +- .../bucket/composite-aggregation.asciidoc | 16 +- .../bucket/datehistogram-aggregation.asciidoc | 340 +++++--- docs/reference/aggregations/misc.asciidoc | 2 +- docs/reference/aggregations/pipeline.asciidoc | 8 +- .../pipeline/avg-bucket-aggregation.asciidoc | 2 +- .../bucket-script-aggregation.asciidoc | 2 +- .../bucket-selector-aggregation.asciidoc | 2 +- .../pipeline/bucket-sort-aggregation.asciidoc | 4 +- .../cumulative-sum-aggregation.asciidoc | 2 +- .../pipeline/derivative-aggregation.asciidoc | 6 +- ...extended-stats-bucket-aggregation.asciidoc | 2 +- .../pipeline/max-bucket-aggregation.asciidoc | 2 +- .../pipeline/min-bucket-aggregation.asciidoc | 2 +- .../pipeline/movavg-aggregation.asciidoc | 18 +- .../pipeline/movfn-aggregation.asciidoc | 22 +- .../percentiles-bucket-aggregation.asciidoc | 2 +- .../pipeline/serial-diff-aggregation.asciidoc | 2 +- .../stats-bucket-aggregation.asciidoc | 2 +- .../pipeline/sum-bucket-aggregation.asciidoc | 2 +- docs/reference/ml/aggregations.asciidoc | 4 +- docs/reference/rollup/apis/get-job.asciidoc | 8 +- docs/reference/rollup/apis/put-job.asciidoc | 2 +- .../rollup/apis/rollup-caps.asciidoc | 4 +- .../rollup/apis/rollup-index-caps.asciidoc | 4 +- .../rollup/apis/rollup-job-config.asciidoc | 4 +- .../rollup/apis/rollup-search.asciidoc | 2 +- .../rollup/rollup-getting-started.asciidoc | 4 +- .../rollup/understanding-groups.asciidoc | 8 +- .../test/painless/70_mov_fn_agg.yml | 18 +- .../org/elasticsearch/search/CCSDuelIT.java | 4 +- .../test/search.aggregation/10_histogram.yml | 5 +- .../test/search.aggregation/230_composite.yml | 74 +- .../search.aggregation/240_max_buckets.yml | 34 +- .../test/search.aggregation/250_moving_fn.yml | 36 +- .../test/search.aggregation/80_typed_keys.yml | 15 +- .../test/search/240_date_nanos.yml | 5 +- .../DateHistogramValuesSourceBuilder.java | 148 ++-- .../DateHistogramAggregationBuilder.java | 180 +++-- .../histogram/DateHistogramInterval.java | 20 + .../histogram/DateIntervalConsumer.java | 40 + .../bucket/histogram/DateIntervalWrapper.java | 427 ++++++++++ .../AggregatorFactoriesTests.java | 4 +- .../search/aggregations/MissingValueIT.java | 4 +- .../aggregations/bucket/DateHistogramIT.java | 21 +- .../CompositeAggregationBuilderTests.java | 5 +- .../composite/CompositeAggregatorTests.java | 11 + ...egacyIntervalCompositeAggBuilderTests.java | 155 ++++ .../DateHistogramAggregatorTests.java | 740 +++++++++++++++++- .../bucket/histogram/DateHistogramTests.java | 25 +- .../histogram/DateIntervalWrapperTests.java | 127 +++ .../pipeline/AvgBucketAggregatorTests.java | 2 +- .../CumulativeSumAggregatorTests.java | 5 +- .../aggregations/pipeline/MovFnUnitTests.java | 2 +- .../ml/datafeed/extractor/ExtractorUtils.java | 11 +- .../core/rollup/action/RollupJobCaps.java | 3 +- .../rollup/job/DateHistogramGroupConfig.java | 150 +++- .../xpack/core/rollup/job/GroupConfig.java | 2 +- .../GetDatafeedsActionResponseTests.java | 2 + .../core/ml/datafeed/DatafeedConfigTests.java | 21 +- .../core/ml/datafeed/DatafeedUpdateTests.java | 16 + .../extractor/ExtractorUtilsTests.java | 16 +- .../xpack/core/rollup/ConfigTestHelpers.java | 28 +- ...eHistogramGroupConfigSerializingTests.java | 84 +- .../integration/DataFramePivotRestIT.java | 14 +- .../integration/DataFrameRestTestCase.java | 12 +- .../transforms/pivot/PivotTests.java | 10 + .../ml/integration/DatafeedJobsRestIT.java | 24 +- .../TransportGetOverallBucketsAction.java | 3 +- .../DatafeedDelayedDataDetector.java | 4 +- .../RollupDataExtractorFactory.java | 11 +- .../extractor/DataExtractorFactoryTests.java | 13 +- .../rollup/RollupJobIdentifierUtils.java | 170 ++-- .../xpack/rollup/RollupRequestTranslator.java | 17 +- .../xpack/rollup/job/RollupIndexer.java | 8 +- .../rollup/RollupJobIdentifierUtilTests.java | 186 ++--- .../rollup/RollupRequestTranslationTests.java | 69 +- .../RollupResponseTranslationTests.java | 31 +- .../rollup/action/SearchActionTests.java | 50 +- .../xpack/rollup/config/ConfigTests.java | 17 +- .../xpack/rollup/job/IndexerUtilsTests.java | 10 +- .../job/RollupIndexerIndexingTests.java | 14 +- .../test/data_frame/preview_transforms.yml | 5 + .../rest-api-spec/test/ml/datafeeds_crud.yml | 2 +- .../rest-api-spec/test/rollup/delete_job.yml | 8 +- .../rest-api-spec/test/rollup/get_jobs.yml | 12 +- .../test/rollup/get_rollup_caps.yml | 20 +- .../test/rollup/get_rollup_index_caps.yml | 40 +- .../rest-api-spec/test/rollup/put_job.yml | 16 +- .../test/rollup/rollup_search.yml | 71 +- .../rest-api-spec/test/rollup/start_job.yml | 2 +- .../rest-api-spec/test/rollup/stop_job.yml | 2 +- .../xpack/restart/FullClusterRestartIT.java | 9 +- .../elasticsearch/multi_node/RollupIT.java | 4 +- x-pack/qa/rolling-upgrade/build.gradle | 5 + .../upgrades/RollupDateHistoUpgradeIT.java | 302 +++++++ .../mixed_cluster/40_ml_datafeed_crud.yml | 8 + .../test/old_cluster/40_ml_datafeed_crud.yml | 78 +- .../upgraded_cluster/40_ml_datafeed_crud.yml | 10 + 110 files changed, 3565 insertions(+), 814 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapperTests.java create mode 100644 x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/DateHistogramGroupConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/DateHistogramGroupConfig.java index 21a610f7894..e56b5476685 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/DateHistogramGroupConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/DateHistogramGroupConfig.java @@ -22,6 +22,7 @@ import org.elasticsearch.client.Validatable; import org.elasticsearch.client.ValidationException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -30,8 +31,11 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInter import org.joda.time.DateTimeZone; import java.io.IOException; +import java.util.Collections; +import java.util.HashSet; import java.util.Objects; import java.util.Optional; +import java.util.Set; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -59,14 +63,63 @@ public class DateHistogramGroupConfig implements Validatable, ToXContentObject { private static final String TIME_ZONE = "time_zone"; private static final String DELAY = "delay"; private static final String DEFAULT_TIMEZONE = "UTC"; + private static final String CALENDAR_INTERVAL = "calendar_interval"; + private static final String FIXED_INTERVAL = "fixed_interval"; + + // From DateHistogramAggregationBuilder in core, transplanted and modified to a set + // so we don't need to import a dependency on the class + private static final Set DATE_FIELD_UNITS; + static { + Set dateFieldUnits = new HashSet<>(); + dateFieldUnits.add("year"); + dateFieldUnits.add("1y"); + dateFieldUnits.add("quarter"); + dateFieldUnits.add("1q"); + dateFieldUnits.add("month"); + dateFieldUnits.add("1M"); + dateFieldUnits.add("week"); + dateFieldUnits.add("1w"); + dateFieldUnits.add("day"); + dateFieldUnits.add("1d"); + dateFieldUnits.add("hour"); + dateFieldUnits.add("1h"); + dateFieldUnits.add("minute"); + dateFieldUnits.add("1m"); + dateFieldUnits.add("second"); + dateFieldUnits.add("1s"); + DATE_FIELD_UNITS = Collections.unmodifiableSet(dateFieldUnits); + } private static final ConstructingObjectParser PARSER; static { - PARSER = new ConstructingObjectParser<>(NAME, true, a -> - new DateHistogramGroupConfig((String) a[0], (DateHistogramInterval) a[1], (DateHistogramInterval) a[2], (String) a[3])); + PARSER = new ConstructingObjectParser<>(NAME, true, a -> { + DateHistogramInterval oldInterval = (DateHistogramInterval) a[1]; + DateHistogramInterval calendarInterval = (DateHistogramInterval) a[2]; + DateHistogramInterval fixedInterval = (DateHistogramInterval) a[3]; + + if (oldInterval != null) { + if (calendarInterval != null || fixedInterval != null) { + throw new IllegalArgumentException("Cannot use [interval] with [fixed_interval] or [calendar_interval] " + + "configuration options."); + } + return new DateHistogramGroupConfig((String) a[0], oldInterval, (DateHistogramInterval) a[4], (String) a[5]); + } else if (calendarInterval != null && fixedInterval == null) { + return new CalendarInterval((String) a[0], calendarInterval, (DateHistogramInterval) a[4], (String) a[5]); + } else if (calendarInterval == null && fixedInterval != null) { + return new FixedInterval((String) a[0], fixedInterval, (DateHistogramInterval) a[4], (String) a[5]); + } else if (calendarInterval != null && fixedInterval != null) { + throw new IllegalArgumentException("Cannot set both [fixed_interval] and [calendar_interval] at the same time"); + } else { + throw new IllegalArgumentException("An interval is required. Use [fixed_interval] or [calendar_interval]."); + } + }); PARSER.declareString(constructorArg(), new ParseField(FIELD)); - PARSER.declareField(constructorArg(), p -> new DateHistogramInterval(p.text()), new ParseField(INTERVAL), ValueType.STRING); - PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()), new ParseField(DELAY), ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()), new ParseField(INTERVAL), ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()), + new ParseField(CALENDAR_INTERVAL), ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()), + new ParseField(FIXED_INTERVAL), ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()), new ParseField(DELAY), ValueType.STRING); PARSER.declareString(optionalConstructorArg(), new ParseField(TIME_ZONE)); } @@ -76,8 +129,57 @@ public class DateHistogramGroupConfig implements Validatable, ToXContentObject { private final String timeZone; /** - * Create a new {@link DateHistogramGroupConfig} using the given field and interval parameters. + * FixedInterval is a {@link DateHistogramGroupConfig} that uses a fixed time interval for rolling up data. + * The fixed time interval is one or multiples of SI units and has no calendar-awareness (e.g. doesn't account + * for leap corrections, does not have variable length months, etc). + * + * For calendar-aware rollups, use {@link CalendarInterval} */ + public static class FixedInterval extends DateHistogramGroupConfig { + public FixedInterval(String field, DateHistogramInterval interval) { + this(field, interval, null, null); + } + + public FixedInterval(String field, DateHistogramInterval interval, DateHistogramInterval delay, String timeZone) { + super(field, interval, delay, timeZone); + // validate fixed time + TimeValue.parseTimeValue(interval.toString(), NAME + ".FixedInterval"); + } + } + + /** + * CalendarInterval is a {@link DateHistogramGroupConfig} that uses calendar-aware intervals for rolling up data. + * Calendar time intervals understand leap corrections and contextual differences in certain calendar units (e.g. + * months are variable length depending on the month). Calendar units are only available in singular quantities: + * 1s, 1m, 1h, 1d, 1w, 1q, 1M, 1y + * + * For fixed time rollups, use {@link FixedInterval} + */ + public static class CalendarInterval extends DateHistogramGroupConfig { + public CalendarInterval(String field, DateHistogramInterval interval) { + this(field, interval, null, null); + + } + + public CalendarInterval(String field, DateHistogramInterval interval, DateHistogramInterval delay, String timeZone) { + super(field, interval, delay, timeZone); + if (DATE_FIELD_UNITS.contains(interval.toString()) == false) { + throw new IllegalArgumentException("The supplied interval [" + interval +"] could not be parsed " + + "as a calendar interval."); + } + } + + } + + /** + * Create a new {@link DateHistogramGroupConfig} using the given field and interval parameters. + * + * @deprecated Build a DateHistoConfig using {@link DateHistogramGroupConfig.CalendarInterval} + * or {@link DateHistogramGroupConfig.FixedInterval} instead + * + * @since 7.2.0 + */ + @Deprecated public DateHistogramGroupConfig(final String field, final DateHistogramInterval interval) { this(field, interval, null, null); } @@ -85,17 +187,22 @@ public class DateHistogramGroupConfig implements Validatable, ToXContentObject { /** * Create a new {@link DateHistogramGroupConfig} using the given configuration parameters. *

- * The {@code field} and {@code interval} are required to compute the date histogram for the rolled up documents. - * The {@code delay} is optional and can be set to {@code null}. It defines how long to wait before rolling up new documents. - * The {@code timeZone} is optional and can be set to {@code null}. When configured, the time zone value is resolved using - * ({@link DateTimeZone#forID(String)} and must match a time zone identifier provided by the Joda Time library. + * The {@code field} and {@code interval} are required to compute the date histogram for the rolled up documents. + * The {@code delay} is optional and can be set to {@code null}. It defines how long to wait before rolling up new documents. + * The {@code timeZone} is optional and can be set to {@code null}. When configured, the time zone value is resolved using + * ({@link DateTimeZone#forID(String)} and must match a time zone identifier provided by the Joda Time library. *

- * - * @param field the name of the date field to use for the date histogram (required) + * @param field the name of the date field to use for the date histogram (required) * @param interval the interval to use for the date histogram (required) - * @param delay the time delay (optional) + * @param delay the time delay (optional) * @param timeZone the id of time zone to use to calculate the date histogram (optional). When {@code null}, the UTC timezone is used. + * + * @deprecated Build a DateHistoConfig using {@link DateHistogramGroupConfig.CalendarInterval} + * or {@link DateHistogramGroupConfig.FixedInterval} instead + * + * @since 7.2.0 */ + @Deprecated public DateHistogramGroupConfig(final String field, final DateHistogramInterval interval, final @Nullable DateHistogramInterval delay, @@ -153,7 +260,13 @@ public class DateHistogramGroupConfig implements Validatable, ToXContentObject { public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startObject(); { - builder.field(INTERVAL, interval.toString()); + if (this.getClass().equals(CalendarInterval.class)) { + builder.field(CALENDAR_INTERVAL, interval.toString()); + } else if (this.getClass().equals(FixedInterval.class)) { + builder.field(FIXED_INTERVAL, interval.toString()); + } else { + builder.field(INTERVAL, interval.toString()); + } builder.field(FIELD, field); if (delay != null) { builder.field(DELAY, delay.toString()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java index d876ce6ed5f..db77d76b793 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java @@ -152,7 +152,7 @@ public class RollupIT extends ESRestHighLevelClientTestCase { public void testDeleteRollupJob() throws Exception { - final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY)); + final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("date", DateHistogramInterval.DAY)); final List metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS)); final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600)); PutRollupJobRequest putRollupJobRequest = @@ -174,7 +174,7 @@ public class RollupIT extends ESRestHighLevelClientTestCase { public void testPutStartAndGetRollupJob() throws Exception { // TODO expand this to also test with histogram and terms? - final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY)); + final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("date", DateHistogramInterval.DAY)); final List metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS)); final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600)); @@ -334,7 +334,7 @@ public class RollupIT extends ESRestHighLevelClientTestCase { final String cron = "*/1 * * * * ?"; final int pageSize = randomIntBetween(numDocs, numDocs * 10); // TODO expand this to also test with histogram and terms? - final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY)); + final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("date", DateHistogramInterval.DAY)); final List metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS)); final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600)); @@ -378,7 +378,7 @@ public class RollupIT extends ESRestHighLevelClientTestCase { case "delay": assertThat(entry.getValue(), equalTo("foo")); break; - case "interval": + case "calendar_interval": assertThat(entry.getValue(), equalTo("1d")); break; case "time_zone": @@ -446,7 +446,7 @@ public class RollupIT extends ESRestHighLevelClientTestCase { final String cron = "*/1 * * * * ?"; final int pageSize = randomIntBetween(numDocs, numDocs * 10); // TODO expand this to also test with histogram and terms? - final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY)); + final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("date", DateHistogramInterval.DAY)); final List metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS)); final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600)); @@ -490,7 +490,7 @@ public class RollupIT extends ESRestHighLevelClientTestCase { case "delay": assertThat(entry.getValue(), equalTo("foo")); break; - case "interval": + case "calendar_interval": assertThat(entry.getValue(), equalTo("1d")); break; case "time_zone": diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java index 8125c2f41f4..2a1c98f0c35 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java @@ -399,8 +399,8 @@ public class RollupDocumentationIT extends ESRestHighLevelClientTestCase { public void testGetRollupCaps() throws Exception { RestHighLevelClient client = highLevelClient(); - DateHistogramGroupConfig dateHistogram = - new DateHistogramGroupConfig("timestamp", DateHistogramInterval.HOUR, new DateHistogramInterval("7d"), "UTC"); // <1> + DateHistogramGroupConfig dateHistogram = new DateHistogramGroupConfig.FixedInterval( + "timestamp", DateHistogramInterval.HOUR, new DateHistogramInterval("7d"), "UTC"); // <1> TermsGroupConfig terms = new TermsGroupConfig("hostname", "datacenter"); HistogramGroupConfig histogram = new HistogramGroupConfig(5L, "load", "net_in", "net_out"); GroupConfig groups = new GroupConfig(dateHistogram, histogram, terms); @@ -473,7 +473,8 @@ public class RollupDocumentationIT extends ESRestHighLevelClientTestCase { // item represents a different aggregation that can be run against the "timestamp" // field, and any additional details specific to that agg (interval, etc) List> timestampCaps = fieldCaps.get("timestamp").getAggs(); - assert timestampCaps.get(0).toString().equals("{agg=date_histogram, delay=7d, interval=1h, time_zone=UTC}"); + logger.error(timestampCaps.get(0).toString()); + assert timestampCaps.get(0).toString().equals("{agg=date_histogram, fixed_interval=1h, delay=7d, time_zone=UTC}"); // In contrast to the timestamp field, the temperature field has multiple aggs configured List> temperatureCaps = fieldCaps.get("temperature").getAggs(); @@ -515,8 +516,8 @@ public class RollupDocumentationIT extends ESRestHighLevelClientTestCase { public void testGetRollupIndexCaps() throws Exception { RestHighLevelClient client = highLevelClient(); - DateHistogramGroupConfig dateHistogram = - new DateHistogramGroupConfig("timestamp", DateHistogramInterval.HOUR, new DateHistogramInterval("7d"), "UTC"); // <1> + DateHistogramGroupConfig dateHistogram = new DateHistogramGroupConfig.FixedInterval( + "timestamp", DateHistogramInterval.HOUR, new DateHistogramInterval("7d"), "UTC"); // <1> TermsGroupConfig terms = new TermsGroupConfig("hostname", "datacenter"); HistogramGroupConfig histogram = new HistogramGroupConfig(5L, "load", "net_in", "net_out"); GroupConfig groups = new GroupConfig(dateHistogram, histogram, terms); @@ -587,7 +588,8 @@ public class RollupDocumentationIT extends ESRestHighLevelClientTestCase { // item represents a different aggregation that can be run against the "timestamp" // field, and any additional details specific to that agg (interval, etc) List> timestampCaps = fieldCaps.get("timestamp").getAggs(); - assert timestampCaps.get(0).toString().equals("{agg=date_histogram, delay=7d, interval=1h, time_zone=UTC}"); + logger.error(timestampCaps.get(0).toString()); + assert timestampCaps.get(0).toString().equals("{agg=date_histogram, fixed_interval=1h, delay=7d, time_zone=UTC}"); // In contrast to the timestamp field, the temperature field has multiple aggs configured List> temperatureCaps = fieldCaps.get("temperature").getAggs(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java index 0b0ed52d0ff..a3b475193e4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField; import org.elasticsearch.test.AbstractXContentTestCase; @@ -79,7 +80,7 @@ public class DatafeedConfigTests extends AbstractXContentTestCase field.endsWith("status.current_position")) .test(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobRequestTests.java index a49f85a1fed..0056a7ad25c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobRequestTests.java @@ -49,7 +49,7 @@ public class PutRollupJobRequestTests extends AbstractXContentTestCase } } @@ -299,7 +299,7 @@ GET /_search "my_buckets": { "composite" : { "sources" : [ - { "date": { "date_histogram": { "field": "timestamp", "interval": "1d" } } }, + { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d" } } }, { "product": { "terms": {"field": "product" } } } ] } @@ -324,7 +324,7 @@ GET /_search "sources" : [ { "shop": { "terms": {"field": "shop" } } }, { "product": { "terms": { "field": "product" } } }, - { "date": { "date_histogram": { "field": "timestamp", "interval": "1d" } } } + { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d" } } } ] } } @@ -352,7 +352,7 @@ GET /_search "my_buckets": { "composite" : { "sources" : [ - { "date": { "date_histogram": { "field": "timestamp", "interval": "1d", "order": "desc" } } }, + { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d", "order": "desc" } } }, { "product": { "terms": {"field": "product", "order": "asc" } } } ] } @@ -420,7 +420,7 @@ GET /_search "composite" : { "size": 2, "sources" : [ - { "date": { "date_histogram": { "field": "timestamp", "interval": "1d" } } }, + { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d" } } }, { "product": { "terms": {"field": "product" } } } ] } @@ -486,7 +486,7 @@ GET /_search "composite" : { "size": 2, "sources" : [ - { "date": { "date_histogram": { "field": "timestamp", "interval": "1d", "order": "desc" } } }, + { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d", "order": "desc" } } }, { "product": { "terms": {"field": "product", "order": "asc" } } } ], "after": { "date": 1494288000000, "product": "mad max" } <1> @@ -515,7 +515,7 @@ GET /_search "my_buckets": { "composite" : { "sources" : [ - { "date": { "date_histogram": { "field": "timestamp", "interval": "1d", "order": "desc" } } }, + { "date": { "date_histogram": { "field": "timestamp", "calendar_interval": "1d", "order": "desc" } } }, { "product": { "terms": {"field": "product" } } } ] }, diff --git a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc index 07a6fd257ef..2ee9025b6de 100644 --- a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -10,102 +10,252 @@ that here the interval can be specified using date/time expressions. Time-based data requires special support because time-based intervals are not always a fixed length. -==== Setting intervals +==== Calendar and Fixed intervals -There seems to be no limit to the creativity we humans apply to setting our -clocks and calendars. We've invented leap years and leap seconds, standard and -daylight savings times, and timezone offsets of 30 or 45 minutes rather than a -full hour. While these creations help keep us in sync with the cosmos and our -environment, they can make specifying time intervals accurately a real challenge. -The only universal truth our researchers have yet to disprove is that a -millisecond is always the same duration, and a second is always 1000 milliseconds. -Beyond that, things get complicated. +When configuring a date histogram aggregation, the interval can be specified +in two manners: calendar-aware time intervals, and fixed time intervals. -Generally speaking, when you specify a single time unit, such as 1 hour or 1 day, you -are working with a _calendar interval_, but multiples, such as 6 hours or 3 days, are -_fixed-length intervals_. +Calendar-aware intervals understand that daylight savings changes the length +of specific days, months have different amounts of days, and leap seconds can +be tacked onto a particular year. -For example, a specification of 1 day (1d) from now is a calendar interval that -means "at -this exact time tomorrow" no matter the length of the day. A change to or from -daylight savings time that results in a 23 or 25 hour day is compensated for and the -specification of "this exact time tomorrow" is maintained. But if you specify 2 or -more days, each day must be of the same fixed duration (24 hours). In this case, if -the specified interval includes the change to or from daylight savings time, the -interval will end an hour sooner or later than you expect. +Fixed intervals are, by contrast, always multiples of SI units and do not change +based on calendaring context. -There are similar differences to consider when you specify single versus multiple -minutes or hours. Multiple time periods longer than a day are not supported. +[NOTE] +.Combined `interval` field is deprecated +================================== +deprecated[7.2, `interval` field is deprecated] Historically both calendar and fixed +intervals were configured in a single `interval` field, which led to confusing +semantics. Specifying `1d` would be assumed as a calendar-aware time, +whereas `2d` would be interpreted as fixed time. To get "one day" of fixed time, +the user would need to specify the next smaller unit (in this case, `24h`). -Here are the valid time specifications and their meanings: +This combined behavior was often unknown to users, and even when knowledgeable about +the behavior it was difficult to use and confusing. + +This behavior has been deprecated in favor of two new, explicit fields: `calendar_interval` +and `fixed_interval`. + +By forcing a choice between calendar and intervals up front, the semantics of the interval +are clear to the user immediately and there is no ambiguity. The old `interval` field +will be removed in the future. +================================== + +===== Calendar Intervals + +Calendar-aware intervals are configured with the `calendar_interval` parameter. +Calendar intervals can only be specified in "singular" quantities of the unit +(`1d`, `1M`, etc). Multiples, such as `2d`, are not supported and will throw an exception. + +The accepted units for calendar intervals are: + +minute (`m`, `1m`) :: +All minutes begin at 00 seconds. + +One minute is the interval between 00 seconds of the first minute and 00 +seconds of the following minute in the specified timezone, compensating for any +intervening leap seconds, so that the number of minutes and seconds past the +hour is the same at the start and end. + +hours (`h`, `1h`) :: +All hours begin at 00 minutes and 00 seconds. + +One hour (1h) is the interval between 00:00 minutes of the first hour and 00:00 +minutes of the following hour in the specified timezone, compensating for any +intervening leap seconds, so that the number of minutes and seconds past the hour +is the same at the start and end. + + +days (`d`, `1d`) :: +All days begin at the earliest possible time, which is usually 00:00:00 +(midnight). + +One day (1d) is the interval between the start of the day and the start of +of the following day in the specified timezone, compensating for any intervening +time changes. + +week (`w`, `1w`) :: + +One week is the interval between the start day_of_week:hour:minute:second +and the same day of the week and time of the following week in the specified +timezone. + +month (`M`, `1M`) :: + +One month is the interval between the start day of the month and time of +day and the same day of the month and time of the following month in the specified +timezone, so that the day of the month and time of day are the same at the start +and end. + +quarter (`q`, `1q`) :: + +One quarter (1q) is the interval between the start day of the month and +time of day and the same day of the month and time of day three months later, +so that the day of the month and time of day are the same at the start and end. + + +year (`y`, `1y`) :: + +One year (1y) is the interval between the start day of the month and time of +day and the same day of the month and time of day the following year in the +specified timezone, so that the date and time are the same at the start and end. + + +===== Calendar Interval Examples +As an example, here is an aggregation requesting bucket intervals of a month in calendar time: + +[source,js] +-------------------------------------------------- +POST /sales/_search?size=0 +{ + "aggs" : { + "sales_over_time" : { + "date_histogram" : { + "field" : "date", + "calendar_interval" : "month" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +If you attempt to use multiples of calendar units, the aggregation will fail because only +singular calendar units are supported: + +[source,js] +-------------------------------------------------- +POST /sales/_search?size=0 +{ + "aggs" : { + "sales_over_time" : { + "date_histogram" : { + "field" : "date", + "calendar_interval" : "2d" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] +// TEST[catch:bad_request] + +[source,js] +-------------------------------------------------- +{ + "error" : { + "root_cause" : [...], + "type" : "x_content_parse_exception", + "reason" : "[1:82] [date_histogram] failed to parse field [calendar_interval]", + "caused_by" : { + "type" : "illegal_argument_exception", + "reason" : "The supplied interval [2d] could not be parsed as a calendar interval.", + "stack_trace" : "java.lang.IllegalArgumentException: The supplied interval [2d] could not be parsed as a calendar interval." + } + } +} + +-------------------------------------------------- +// NOTCONSOLE + +===== Fixed Intervals + +Fixed intervals are configured with the `fixed_interval` parameter. + +In contrast to calendar-aware intervals, fixed intervals are a fixed number of SI +units and never deviate, regardless of where they fall on the calendar. One second +is always composed of 1000ms. This allows fixed intervals to be specified in +any multiple of the supported units. + +However, it means fixed intervals cannot express other units such as months, +since the duration of a month is not a fixed quantity. Attempting to specify +a calendar interval like month or quarter will throw an exception. + +The accepted units for fixed intervals are: milliseconds (ms) :: -Fixed length interval; supports multiples. seconds (s) :: -1000 milliseconds; fixed length interval (except for the last second of a -minute that contains a leap-second, which is 2000ms long); supports multiples. +Defined as 1000 milliseconds each minutes (m) :: All minutes begin at 00 seconds. -* One minute (1m) is the interval between 00 seconds of the first minute and 00 -seconds of the following minute in the specified timezone, compensating for any -intervening leap seconds, so that the number of minutes and seconds past the -hour is the same at the start and end. -* Multiple minutes (__n__m) are intervals of exactly 60x1000=60,000 milliseconds -each. +Defined as 60 seconds each (60,000 milliseconds) hours (h) :: All hours begin at 00 minutes and 00 seconds. - -* One hour (1h) is the interval between 00:00 minutes of the first hour and 00:00 -minutes of the following hour in the specified timezone, compensating for any -intervening leap seconds, so that the number of minutes and seconds past the hour -is the same at the start and end. -* Multiple hours (__n__h) are intervals of exactly 60x60x1000=3,600,000 milliseconds -each. +Defined as 60 minutes each (3,600,000 milliseconds) days (d) :: All days begin at the earliest possible time, which is usually 00:00:00 (midnight). -* One day (1d) is the interval between the start of the day and the start of -of the following day in the specified timezone, compensating for any intervening -time changes. -* Multiple days (__n__d) are intervals of exactly 24x60x60x1000=86,400,000 -milliseconds each. +Defined as 24 hours (86,400,000 milliseconds) -weeks (w) :: +===== Fixed Interval Examples -* One week (1w) is the interval between the start day_of_week:hour:minute:second -and the same day of the week and time of the following week in the specified -timezone. -* Multiple weeks (__n__w) are not supported. +If we try to recreate the "month" `calendar_interval` from earlier, we can approximate that with +30 fixed days: -months (M) :: +[source,js] +-------------------------------------------------- +POST /sales/_search?size=0 +{ + "aggs" : { + "sales_over_time" : { + "date_histogram" : { + "field" : "date", + "fixed_interval" : "30d" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] -* One month (1M) is the interval between the start day of the month and time of -day and the same day of the month and time of the following month in the specified -timezone, so that the day of the month and time of day are the same at the start -and end. -* Multiple months (__n__M) are not supported. +But if we try to use a calendar unit that is not supported, such as weeks, we'll get an exception: -quarters (q) :: +[source,js] +-------------------------------------------------- +POST /sales/_search?size=0 +{ + "aggs" : { + "sales_over_time" : { + "date_histogram" : { + "field" : "date", + "fixed_interval" : "2w" + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] +// TEST[catch:bad_request] -* One quarter (1q) is the interval between the start day of the month and -time of day and the same day of the month and time of day three months later, -so that the day of the month and time of day are the same at the start and end. + -* Multiple quarters (__n__q) are not supported. +[source,js] +-------------------------------------------------- +{ + "error" : { + "root_cause" : [...], + "type" : "x_content_parse_exception", + "reason" : "[1:82] [date_histogram] failed to parse field [fixed_interval]", + "caused_by" : { + "type" : "illegal_argument_exception", + "reason" : "failed to parse setting [date_histogram.fixedInterval] with value [2w] as a time value: unit is missing or unrecognized", + "stack_trace" : "java.lang.IllegalArgumentException: failed to parse setting [date_histogram.fixedInterval] with value [2w] as a time value: unit is missing or unrecognized" + } + } +} -years (y) :: +-------------------------------------------------- +// NOTCONSOLE -* One year (1y) is the interval between the start day of the month and time of -day and the same day of the month and time of day the following year in the -specified timezone, so that the date and time are the same at the start and end. + -* Multiple years (__n__y) are not supported. +===== Notes -NOTE: In all cases, when the specified end time does not exist, the actual end time is the closest available time after the specified end. @@ -123,49 +273,11 @@ WARNING: To avoid unexpected results, all connected servers and clients must sync to a reliable network time service. -==== Examples +NOTE: fractional time values are not supported, but you can address this by +shifting to another time unit (e.g., `1.5h` could instead be specified as `90m`). -Requesting bucket intervals of a month. - -[source,js] --------------------------------------------------- -POST /sales/_search?size=0 -{ - "aggs" : { - "sales_over_time" : { - "date_histogram" : { - "field" : "date", - "interval" : "month" - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:sales] - -You can also specify time values using abbreviations supported by +NOTE: You can also specify time values using abbreviations supported by <> parsing. -Note that fractional time values are not supported, but you can address this by -shifting to another -time unit (e.g., `1.5h` could instead be specified as `90m`). - -[source,js] --------------------------------------------------- -POST /sales/_search?size=0 -{ - "aggs" : { - "sales_over_time" : { - "date_histogram" : { - "field" : "date", - "interval" : "90m" - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:sales] ===== Keys @@ -186,7 +298,7 @@ POST /sales/_search?size=0 "sales_over_time" : { "date_histogram" : { "field" : "date", - "interval" : "1M", + "calendar_interval" : "1M", "format" : "yyyy-MM-dd" <1> } } @@ -259,7 +371,7 @@ GET my_index/_search?size=0 "by_day": { "date_histogram": { "field": "date", - "interval": "day" + "calendar_interval": "day" } } } @@ -301,7 +413,7 @@ GET my_index/_search?size=0 "by_day": { "date_histogram": { "field": "date", - "interval": "day", + "calendar_interval": "day", "time_zone": "-01:00" } } @@ -380,7 +492,7 @@ GET my_index/_search?size=0 "by_day": { "date_histogram": { "field": "date", - "interval": "day", + "calendar_interval": "day", "offset": "+6h" } } @@ -432,7 +544,7 @@ POST /sales/_search?size=0 "sales_over_time" : { "date_histogram" : { "field" : "date", - "interval" : "1M", + "calendar_interval" : "1M", "format" : "yyyy-MM-dd", "keyed": true } @@ -502,7 +614,7 @@ POST /sales/_search?size=0 "sale_date" : { "date_histogram" : { "field" : "date", - "interval": "year", + "calendar_interval": "year", "missing": "2000/01/01" <1> } } @@ -522,8 +634,6 @@ control the order using the `order` setting. This setting supports the same `order` functionality as <>. -deprecated[6.0.0, Use `_key` instead of `_time` to order buckets by their dates/keys] - ===== Using a script to aggregate by day of the week When you need to aggregate the results by day of the week, use a script that diff --git a/docs/reference/aggregations/misc.asciidoc b/docs/reference/aggregations/misc.asciidoc index 288643dbf93..678ebc0a8a4 100644 --- a/docs/reference/aggregations/misc.asciidoc +++ b/docs/reference/aggregations/misc.asciidoc @@ -102,7 +102,7 @@ GET /twitter/_search?typed_keys "tweets_over_time": { "date_histogram": { "field": "date", - "interval": "year" + "calendar_interval": "year" }, "aggregations": { "top_users": { diff --git a/docs/reference/aggregations/pipeline.asciidoc b/docs/reference/aggregations/pipeline.asciidoc index 37c1c357007..81d711cc29c 100644 --- a/docs/reference/aggregations/pipeline.asciidoc +++ b/docs/reference/aggregations/pipeline.asciidoc @@ -57,7 +57,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"timestamp", - "interval":"day" + "calendar_interval":"day" }, "aggs":{ "the_sum":{ @@ -88,7 +88,7 @@ POST /_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "sales": { @@ -125,7 +125,7 @@ POST /_search "my_date_histo": { "date_histogram": { "field":"timestamp", - "interval":"day" + "calendar_interval":"day" }, "aggs": { "the_movavg": { @@ -153,7 +153,7 @@ POST /sales/_search "histo": { "date_histogram": { "field": "date", - "interval": "day" + "calendar_interval": "day" }, "aggs": { "categories": { diff --git a/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc index ea150b4ab6d..33ccf505e5b 100644 --- a/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc @@ -42,7 +42,7 @@ POST /_search "sales_per_month": { "date_histogram": { "field": "date", - "interval": "month" + "calendar_interval": "month" }, "aggs": { "sales": { diff --git a/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc index 7dc99ba7719..6ecd1248a4e 100644 --- a/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc @@ -50,7 +50,7 @@ POST /sales/_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "total_sales": { diff --git a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc index 7ec19174a06..41ce04803fb 100644 --- a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc @@ -53,7 +53,7 @@ POST /sales/_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "total_sales": { diff --git a/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc index a136a6ee4d5..d219e005d75 100644 --- a/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-sort-aggregation.asciidoc @@ -56,7 +56,7 @@ POST /sales/_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "total_sales": { @@ -144,7 +144,7 @@ POST /sales/_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "bucket_truncate": { diff --git a/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc b/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc index a6dff7fa846..226fd9c1bd1 100644 --- a/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc @@ -40,7 +40,7 @@ POST /sales/_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "sales": { diff --git a/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc b/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc index f40ace7432d..d987294f965 100644 --- a/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc @@ -43,7 +43,7 @@ POST /sales/_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "sales": { @@ -137,7 +137,7 @@ POST /sales/_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "sales": { @@ -237,7 +237,7 @@ POST /sales/_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "sales": { diff --git a/docs/reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc index c35223885fc..8f9522ec3e5 100644 --- a/docs/reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/extended-stats-bucket-aggregation.asciidoc @@ -44,7 +44,7 @@ POST /sales/_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "sales": { diff --git a/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc index 53a3aaa28f7..58bdab6128b 100644 --- a/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc @@ -42,7 +42,7 @@ POST /sales/_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "sales": { diff --git a/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc index 620cf02c714..8f075f7e071 100644 --- a/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc @@ -42,7 +42,7 @@ POST /sales/_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "sales": { diff --git a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc index 06641391ced..7c80e4797ba 100644 --- a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc @@ -62,7 +62,7 @@ POST /_search "my_date_histo":{ <1> "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -165,7 +165,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -219,7 +219,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -279,7 +279,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -338,7 +338,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -427,7 +427,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -488,7 +488,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -538,7 +538,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -617,7 +617,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ diff --git a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc index f6fb25c76f6..ea414237174 100644 --- a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc @@ -46,7 +46,7 @@ POST /_search "my_date_histo":{ <1> "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -148,7 +148,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -207,7 +207,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -250,7 +250,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -293,7 +293,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -338,7 +338,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -390,7 +390,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -436,7 +436,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -488,7 +488,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -546,7 +546,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ @@ -612,7 +612,7 @@ POST /_search "my_date_histo":{ "date_histogram":{ "field":"date", - "interval":"1M" + "calendar_interval":"1M" }, "aggs":{ "the_sum":{ diff --git a/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc index 456a4046c06..6620e5689cb 100644 --- a/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/percentiles-bucket-aggregation.asciidoc @@ -43,7 +43,7 @@ POST /sales/_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "sales": { diff --git a/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc b/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc index 1506e396858..7a414b95d40 100644 --- a/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc @@ -69,7 +69,7 @@ POST /_search "my_date_histo": { <1> "date_histogram": { "field": "timestamp", - "interval": "day" + "calendar_interval": "day" }, "aggs": { "the_sum": { diff --git a/docs/reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc index 2c9f585ebea..8c6359fb776 100644 --- a/docs/reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/stats-bucket-aggregation.asciidoc @@ -41,7 +41,7 @@ POST /sales/_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "sales": { diff --git a/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc index 83e0e321350..f0a19f32541 100644 --- a/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc @@ -41,7 +41,7 @@ POST /sales/_search "sales_per_month" : { "date_histogram" : { "field" : "date", - "interval" : "month" + "calendar_interval" : "month" }, "aggs": { "sales": { diff --git a/docs/reference/ml/aggregations.asciidoc b/docs/reference/ml/aggregations.asciidoc index a50016807a7..1fad9f1b2bb 100644 --- a/docs/reference/ml/aggregations.asciidoc +++ b/docs/reference/ml/aggregations.asciidoc @@ -63,7 +63,7 @@ PUT _ml/datafeeds/datafeed-farequote "buckets": { "date_histogram": { "field": "time", - "interval": "360s", + "fixed_interval": "360s", "time_zone": "UTC" }, "aggregations": { @@ -119,7 +119,7 @@ pipeline aggregation to find the first order derivative of the counter "buckets": { "date_histogram": { "field": "@timestamp", - "interval": "5m" + "fixed_interval": "5m" }, "aggregations": { "@timestamp": { diff --git a/docs/reference/rollup/apis/get-job.asciidoc b/docs/reference/rollup/apis/get-job.asciidoc index ff4d62fb800..4e39778eebd 100644 --- a/docs/reference/rollup/apis/get-job.asciidoc +++ b/docs/reference/rollup/apis/get-job.asciidoc @@ -63,7 +63,7 @@ Which will yield the following response: "cron" : "*/30 * * * * ?", "groups" : { "date_histogram" : { - "interval" : "1h", + "fixed_interval" : "1h", "delay": "7d", "field": "timestamp", "time_zone": "UTC" @@ -149,7 +149,7 @@ PUT _rollup/job/sensor2 <1> "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "fixed_interval": "1h", "delay": "7d" }, "terms": { @@ -189,7 +189,7 @@ Which will yield the following response: "cron" : "*/30 * * * * ?", "groups" : { "date_histogram" : { - "interval" : "1h", + "fixed_interval" : "1h", "delay": "7d", "field": "timestamp", "time_zone": "UTC" @@ -244,7 +244,7 @@ Which will yield the following response: "cron" : "*/30 * * * * ?", "groups" : { "date_histogram" : { - "interval" : "1h", + "fixed_interval" : "1h", "delay": "7d", "field": "timestamp", "time_zone": "UTC" diff --git a/docs/reference/rollup/apis/put-job.asciidoc b/docs/reference/rollup/apis/put-job.asciidoc index b43c5a0e90b..eac71a48b43 100644 --- a/docs/reference/rollup/apis/put-job.asciidoc +++ b/docs/reference/rollup/apis/put-job.asciidoc @@ -68,7 +68,7 @@ PUT _rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "fixed_interval": "1h", "delay": "7d" }, "terms": { diff --git a/docs/reference/rollup/apis/rollup-caps.asciidoc b/docs/reference/rollup/apis/rollup-caps.asciidoc index a0de0f99f98..e50806f3c1e 100644 --- a/docs/reference/rollup/apis/rollup-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-caps.asciidoc @@ -62,7 +62,7 @@ PUT _rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "fixed_interval": "1h", "delay": "7d" }, "terms": { @@ -125,7 +125,7 @@ Which will yield the following response: { "agg" : "date_histogram", "time_zone" : "UTC", - "interval" : "1h", + "fixed_interval" : "1h", "delay": "7d" } ], diff --git a/docs/reference/rollup/apis/rollup-index-caps.asciidoc b/docs/reference/rollup/apis/rollup-index-caps.asciidoc index 1fad99e0311..a0697ba7032 100644 --- a/docs/reference/rollup/apis/rollup-index-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-index-caps.asciidoc @@ -53,7 +53,7 @@ PUT _rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "fixed_interval": "1h", "delay": "7d" }, "terms": { @@ -118,7 +118,7 @@ This will yield the following response: { "agg" : "date_histogram", "time_zone" : "UTC", - "interval" : "1h", + "fixed_interval" : "1h", "delay": "7d" } ], diff --git a/docs/reference/rollup/apis/rollup-job-config.asciidoc b/docs/reference/rollup/apis/rollup-job-config.asciidoc index 852f7b879fb..8277834d5e4 100644 --- a/docs/reference/rollup/apis/rollup-job-config.asciidoc +++ b/docs/reference/rollup/apis/rollup-job-config.asciidoc @@ -24,7 +24,7 @@ PUT _rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "60m", + "fixed_interval": "60m", "delay": "7d" }, "terms": { @@ -100,7 +100,7 @@ fields will then be available later for aggregating into buckets. For example, "groups" : { "date_histogram": { "field": "timestamp", - "interval": "60m", + "fixed_interval": "60m", "delay": "7d" }, "terms": { diff --git a/docs/reference/rollup/apis/rollup-search.asciidoc b/docs/reference/rollup/apis/rollup-search.asciidoc index 244f304ed91..ec2a554d09f 100644 --- a/docs/reference/rollup/apis/rollup-search.asciidoc +++ b/docs/reference/rollup/apis/rollup-search.asciidoc @@ -62,7 +62,7 @@ PUT _rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "fixed_interval": "1h", "delay": "7d" }, "terms": { diff --git a/docs/reference/rollup/rollup-getting-started.asciidoc b/docs/reference/rollup/rollup-getting-started.asciidoc index 71a8ed73cc8..3e2d694464e 100644 --- a/docs/reference/rollup/rollup-getting-started.asciidoc +++ b/docs/reference/rollup/rollup-getting-started.asciidoc @@ -39,7 +39,7 @@ PUT _rollup/job/sensor "groups" : { "date_histogram": { "field": "timestamp", - "interval": "60m" + "fixed_interval": "60m" }, "terms": { "fields": ["node"] @@ -194,7 +194,7 @@ GET /sensor_rollup/_rollup_search "timeline": { "date_histogram": { "field": "timestamp", - "interval": "7d" + "fixed_interval": "7d" }, "aggs": { "nodes": { diff --git a/docs/reference/rollup/understanding-groups.asciidoc b/docs/reference/rollup/understanding-groups.asciidoc index 4733467ec33..a59c19fbf5c 100644 --- a/docs/reference/rollup/understanding-groups.asciidoc +++ b/docs/reference/rollup/understanding-groups.asciidoc @@ -22,7 +22,7 @@ based on which groups are potentially useful to future queries. For example, th "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "fixed_interval": "1h", "delay": "7d" }, "terms": { @@ -47,7 +47,7 @@ Importantly, these aggs/fields can be used in any combination. This aggregation "hourly": { "date_histogram": { "field": "timestamp", - "interval": "1h" + "fixed_interval": "1h" }, "aggs": { "host_names": { @@ -69,7 +69,7 @@ is just as valid as this aggregation: "hourly": { "date_histogram": { "field": "timestamp", - "interval": "1h" + "fixed_interval": "1h" }, "aggs": { "data_center": { @@ -171,7 +171,7 @@ PUT _rollup/job/combined "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h", + "fixed_interval": "1h", "delay": "7d" }, "terms": { diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml index c2fb38611a3..cca143f0bcc 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml @@ -66,7 +66,7 @@ setup: the_histo: date_histogram: field: "date" - interval: "1d" + calendar_interval: "1d" aggs: the_avg: avg: @@ -98,7 +98,7 @@ setup: the_histo: date_histogram: field: "date" - interval: "1d" + calendar_interval: "1d" aggs: the_avg: avg: @@ -130,7 +130,7 @@ setup: the_histo: date_histogram: field: "date" - interval: "1d" + calendar_interval: "1d" aggs: the_avg: avg: @@ -162,7 +162,7 @@ setup: the_histo: date_histogram: field: "date" - interval: "1d" + calendar_interval: "1d" aggs: the_avg: avg: @@ -189,7 +189,7 @@ setup: the_histo: date_histogram: field: "date" - interval: "1d" + calendar_interval: "1d" aggs: the_avg: avg: @@ -216,7 +216,7 @@ setup: the_histo: date_histogram: field: "date" - interval: "1d" + calendar_interval: "1d" aggs: the_avg: avg: @@ -243,7 +243,7 @@ setup: the_histo: date_histogram: field: "date" - interval: "1d" + calendar_interval: "1d" aggs: the_avg: avg: @@ -270,7 +270,7 @@ setup: the_histo: date_histogram: field: "date" - interval: "1d" + calendar_interval: "1d" aggs: the_avg: avg: @@ -296,7 +296,7 @@ setup: the_histo: date_histogram: field: "date" - interval: "1d" + calendar_interval: "1d" aggs: the_avg: avg: diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index 4a18ddbe1b6..558e6071255 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -564,7 +564,7 @@ public class CCSDuelIT extends ESRestTestCase { tags.showTermDocCountError(true); DateHistogramAggregationBuilder creation = new DateHistogramAggregationBuilder("creation"); creation.field("creationDate"); - creation.dateHistogramInterval(DateHistogramInterval.QUARTER); + creation.calendarInterval(DateHistogramInterval.QUARTER); creation.subAggregation(tags); sourceBuilder.aggregation(creation); duelSearch(searchRequest, CCSDuelIT::assertAggs); @@ -591,7 +591,7 @@ public class CCSDuelIT extends ESRestTestCase { sourceBuilder.size(0); DateHistogramAggregationBuilder daily = new DateHistogramAggregationBuilder("daily"); daily.field("creationDate"); - daily.dateHistogramInterval(DateHistogramInterval.DAY); + daily.calendarInterval(DateHistogramInterval.DAY); sourceBuilder.aggregation(daily); daily.subAggregation(new DerivativePipelineAggregationBuilder("derivative", "_count")); sourceBuilder.aggregation(new MaxBucketPipelineAggregationBuilder("biggest_day", "daily._count")); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml index f07ac96e67e..3c4dba98ab9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/10_histogram.yml @@ -143,7 +143,8 @@ setup: "Deprecated _time order": - skip: - reason: _time order deprecated in 6.0, replaced by _key + version: " - 7.1.99" + reason: _time order deprecated in 6.0, replaced by _key. Calendar_interval added in 7.2 features: "warnings" - do: @@ -176,7 +177,7 @@ setup: - do: search: rest_total_hits_as_int: true - body: { "aggs" : { "histo" : { "date_histogram" : { "field" : "date", "interval" : "month", "order" : { "_time" : "desc" } } } } } + body: { "aggs" : { "histo" : { "date_histogram" : { "field" : "date", "calendar_interval" : "month", "order" : { "_time" : "desc" } } } } } warnings: - "Deprecated aggregation order key [_time] used, replaced by [_key]" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml index 535e5565008..88fb807ba2e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/230_composite.yml @@ -264,8 +264,74 @@ setup: --- "Composite aggregation with format": - skip: - version: " - 6.2.99" - reason: this uses a new option (format) added in 6.3.0 + version: " - 7.1.99" + reason: calendar_interval introduced in 7.2.0 + features: warnings + + - do: + warnings: + - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + test: + composite: + sources: [ + { + "date": { + "date_histogram": { + "field": "date", + "interval": "1d", + "format": "yyyy-MM-dd" + } + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.test.buckets: 2 } + - match: { aggregations.test.buckets.0.key.date: "2017-10-20" } + - match: { aggregations.test.buckets.0.doc_count: 1 } + - match: { aggregations.test.buckets.1.key.date: "2017-10-21" } + - match: { aggregations.test.buckets.1.doc_count: 1 } + + - do: + warnings: + - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' + search: + rest_total_hits_as_int: true + index: test + body: + aggregations: + test: + composite: + after: { + date: "2017-10-20" + } + sources: [ + { + "date": { + "date_histogram": { + "field": "date", + "interval": "1d", + "format": "yyyy-MM-dd" + } + } + } + ] + + - match: {hits.total: 6} + - length: { aggregations.test.buckets: 1 } + - match: { aggregations.test.buckets.0.key.date: "2017-10-21" } + - match: { aggregations.test.buckets.0.doc_count: 1 } + +--- +"Composite aggregation with format and calendar_interval": + - skip: + version: " - 7.1.99" + reason: calendar_interval introduced in 7.2.0 - do: search: @@ -280,7 +346,7 @@ setup: "date": { "date_histogram": { "field": "date", - "interval": "1d", + "calendar_interval": "1d", "format": "yyyy-MM-dd" } } @@ -310,7 +376,7 @@ setup: "date": { "date_histogram": { "field": "date", - "interval": "1d", + "calendar_interval": "1d", "format": "yyyy-MM-dd" } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml index 3dd8d345043..1b23eea01b7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml @@ -89,18 +89,25 @@ setup: catch: /.*Trying to create too many buckets.*/ search: rest_total_hits_as_int: true + allow_partial_search_results: false index: test body: aggregations: test: - date_histogram: - field: date - interval: 1d + terms: + field: keyword + + - do: + cluster.put_settings: + body: + transient: + search.max_buckets: 6 - do: catch: /.*Trying to create too many buckets.*/ search: rest_total_hits_as_int: true + allow_partial_search_results: false index: test body: aggregations: @@ -109,25 +116,6 @@ setup: field: keyword aggs: 2: - date_histogram: + terms: field: date - interval: 1d - - do: - cluster.put_settings: - body: - transient: - search.max_buckets: 100 - - - do: - catch: /.*Trying to create too many buckets.*/ - search: - rest_total_hits_as_int: true - index: test - body: - aggregations: - test: - date_histogram: - field: date - interval: 1d - min_doc_count: 0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml index 0a7affd276a..14e626b94e7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml @@ -6,8 +6,43 @@ setup: --- "Bad window": + - skip: + version: " - 7.1.99" + reason: "calendar_interval added in 7.2" + - do: catch: /\[window\] must be a positive, non-zero integer\./ + search: + rest_total_hits_as_int: true + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + calendar_interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: -1 + script: "MovingFunctions.windowMax(values)" + +--- +"Bad window deprecated interval": + + - skip: + version: " - 7.1.99" + reason: "interval deprecation added in 7.2" + features: "warnings" + + - do: + catch: /\[window\] must be a positive, non-zero integer\./ + warnings: + - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future." search: rest_total_hits_as_int: true body: @@ -26,7 +61,6 @@ setup: buckets_path: "the_avg" window: -1 script: "MovingFunctions.windowMax(values)" - --- "Not under date_histo": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml index 7897d1feb5a..d0414325564 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml @@ -206,12 +206,9 @@ setup: --- "Test typed keys parameter for date_histogram aggregation and max_bucket pipeline aggregation": - skip: - features: warnings - version: " - 6.3.99" - reason: "deprecation added in 6.4.0" + version: " - 7.1.99" + reason: "calendar_interval added in 7.2" - do: - warnings: - - 'The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.' search: rest_total_hits_as_int: true typed_keys: true @@ -221,13 +218,13 @@ setup: test_created_histogram: date_histogram: field: created - interval: month + calendar_interval: month aggregations: test_sum: sum: field: num - test_moving_avg: - moving_avg: + test_deriv: + derivative: buckets_path: "test_sum" test_max_bucket: max_bucket: @@ -236,5 +233,5 @@ setup: - is_true: aggregations.date_histogram#test_created_histogram - is_true: aggregations.date_histogram#test_created_histogram.buckets.0.sum#test_sum - is_true: aggregations.date_histogram#test_created_histogram.buckets.1.sum#test_sum - - is_true: aggregations.date_histogram#test_created_histogram.buckets.1.simple_value#test_moving_avg + - is_true: aggregations.date_histogram#test_created_histogram.buckets.1.derivative#test_deriv - is_true: aggregations.bucket_metric_value#test_max_bucket diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml index f5345ba57b1..6aaece2f9c5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/240_date_nanos.yml @@ -124,6 +124,9 @@ setup: --- "date histogram aggregation with date and date_nanos mapping": + - skip: + version: " - 7.1.99" + reason: calendar_interval introduced in 7.2.0 - do: bulk: @@ -148,7 +151,7 @@ setup: date: date_histogram: field: date - interval: 1d + calendar_interval: 1d - match: { hits.total: 4 } - length: { aggregations.date.buckets: 2 } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 53a7832884c..bb7632278de 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -19,12 +19,10 @@ package org.elasticsearch.search.aggregations.bucket.composite; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -33,7 +31,8 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.script.Script; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.DateIntervalConsumer; +import org.elasticsearch.search.aggregations.bucket.histogram.DateIntervalWrapper; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -44,32 +43,19 @@ import java.time.ZoneId; import java.time.ZoneOffset; import java.util.Objects; -import static org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder.DATE_FIELD_UNITS; - /** * A {@link CompositeValuesSourceBuilder} that builds a {@link RoundingValuesSource} from a {@link Script} or * a field name using the provided interval. */ -public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuilder { +public class DateHistogramValuesSourceBuilder + extends CompositeValuesSourceBuilder implements DateIntervalConsumer { static final String TYPE = "date_histogram"; private static final ObjectParser PARSER; static { PARSER = new ObjectParser<>(DateHistogramValuesSourceBuilder.TYPE); PARSER.declareString(DateHistogramValuesSourceBuilder::format, new ParseField("format")); - PARSER.declareField((histogram, interval) -> { - if (interval instanceof Long) { - histogram.interval((long) interval); - } else { - histogram.dateHistogramInterval((DateHistogramInterval) interval); - } - }, p -> { - if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { - return p.longValue(); - } else { - return new DateHistogramInterval(p.text()); - } - }, Histogram.INTERVAL_FIELD, ObjectParser.ValueType.LONG); + DateIntervalWrapper.declareIntervalFields(PARSER); PARSER.declareField(DateHistogramValuesSourceBuilder::timeZone, p -> { if (p.currentToken() == XContentParser.Token.VALUE_STRING) { return ZoneId.of(p.text()); @@ -83,9 +69,8 @@ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuild return PARSER.parse(parser, new DateHistogramValuesSourceBuilder(name), null); } - private long interval = 0; private ZoneId timeZone = null; - private DateHistogramInterval dateHistogramInterval; + private DateIntervalWrapper dateHistogramInterval = new DateIntervalWrapper(); public DateHistogramValuesSourceBuilder(String name) { super(name, ValueType.DATE); @@ -93,33 +78,19 @@ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuild protected DateHistogramValuesSourceBuilder(StreamInput in) throws IOException { super(in); - this.interval = in.readLong(); - this.dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); - if (in.getVersion().before(Version.V_7_0_0)) { - this.timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); - } else { - this.timeZone = in.readOptionalZoneId(); - } + dateHistogramInterval = new DateIntervalWrapper(in); + timeZone = in.readOptionalZoneId(); } @Override protected void innerWriteTo(StreamOutput out) throws IOException { - out.writeLong(interval); - out.writeOptionalWriteable(dateHistogramInterval); - if (out.getVersion().before(Version.V_7_0_0)) { - out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); - } else { - out.writeOptionalZoneId(timeZone); - } + dateHistogramInterval.writeTo(out); + out.writeOptionalZoneId(timeZone); } @Override protected void doXContentBody(XContentBuilder builder, Params params) throws IOException { - if (dateHistogramInterval == null) { - builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), interval); - } else { - builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), dateHistogramInterval.toString()); - } + dateHistogramInterval.toXContent(builder, params); if (timeZone != null) { builder.field("time_zone", timeZone.toString()); } @@ -127,13 +98,12 @@ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuild @Override protected int innerHashCode() { - return Objects.hash(interval, dateHistogramInterval, timeZone); + return Objects.hash(dateHistogramInterval, timeZone); } @Override protected boolean innerEquals(DateHistogramValuesSourceBuilder other) { - return Objects.equals(interval, other.interval) - && Objects.equals(dateHistogramInterval, other.dateHistogramInterval) + return Objects.equals(dateHistogramInterval, other.dateHistogramInterval) && Objects.equals(timeZone, other.timeZone); } @@ -145,38 +115,84 @@ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuild /** * Returns the interval in milliseconds that is set on this source **/ + @Deprecated public long interval() { - return interval; + return dateHistogramInterval.interval(); } /** * Sets the interval on this source. * If both {@link #interval()} and {@link #dateHistogramInterval()} are set, * then the {@link #dateHistogramInterval()} wins. + * + * @deprecated Use {@link #calendarInterval(DateHistogramInterval)} or {@link #fixedInterval(DateHistogramInterval)} instead + * @since 7.2.0 **/ + @Deprecated public DateHistogramValuesSourceBuilder interval(long interval) { - if (interval < 1) { - throw new IllegalArgumentException("[interval] must be 1 or greater for [date_histogram] source"); - } - this.interval = interval; + dateHistogramInterval.interval(interval); return this; } /** * Returns the date interval that is set on this source **/ + @Deprecated public DateHistogramInterval dateHistogramInterval() { - return dateHistogramInterval; + return dateHistogramInterval.dateHistogramInterval(); } - public DateHistogramValuesSourceBuilder dateHistogramInterval(DateHistogramInterval dateHistogramInterval) { - if (dateHistogramInterval == null) { - throw new IllegalArgumentException("[dateHistogramInterval] must not be null"); - } - this.dateHistogramInterval = dateHistogramInterval; + /** + * @deprecated Use {@link #calendarInterval(DateHistogramInterval)} or {@link #fixedInterval(DateHistogramInterval)} instead + * @since 7.2.0 + */ + @Deprecated + public DateHistogramValuesSourceBuilder dateHistogramInterval(DateHistogramInterval interval) { + dateHistogramInterval.dateHistogramInterval(interval); return this; } + /** + * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units + * are calendar-aware, meaning they respect leap additions, variable days per month, etc. + * + * This is mutually exclusive with {@link DateHistogramValuesSourceBuilder#fixedInterval(DateHistogramInterval)} + * + * @param interval The calendar interval to use with the aggregation + */ + public DateHistogramValuesSourceBuilder calendarInterval(DateHistogramInterval interval) { + dateHistogramInterval.calendarInterval(interval); + return this; + } + + /** + * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are + * not calendar aware and are simply multiples of fixed, SI units. + * + * This is mutually exclusive with {@link DateHistogramValuesSourceBuilder#calendarInterval(DateHistogramInterval)} + * + * @param interval The fixed interval to use with the aggregation + */ + public DateHistogramValuesSourceBuilder fixedInterval(DateHistogramInterval interval) { + dateHistogramInterval.fixedInterval(interval); + return this; + } + + /** Return the interval as a date time unit if applicable, regardless of how it was configured. If this returns + * {@code null} then it means that the interval is expressed as a fixed + * {@link TimeValue} and may be accessed via {@link #getIntervalAsFixed()} ()}. */ + public DateHistogramInterval getIntervalAsCalendar() { + return dateHistogramInterval.getAsCalendarInterval(); + } + + /** + * Get the interval as a {@link TimeValue}, regardless of how it was configured. Returns null if + * the interval cannot be parsed as a fixed time. + */ + public DateHistogramInterval getIntervalAsFixed() { + return dateHistogramInterval.getAsFixedInterval(); + } + /** * Sets the time zone to use for this aggregation */ @@ -195,31 +211,9 @@ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuild return timeZone; } - private Rounding createRounding() { - Rounding.Builder tzRoundingBuilder; - if (dateHistogramInterval != null) { - Rounding.DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); - if (dateTimeUnit != null) { - tzRoundingBuilder = Rounding.builder(dateTimeUnit); - } else { - // the interval is a time value? - tzRoundingBuilder = Rounding.builder( - TimeValue.parseTimeValue(dateHistogramInterval.toString(), null, getClass().getSimpleName() + ".interval")); - } - } else { - // the interval is an integer time value in millis? - tzRoundingBuilder = Rounding.builder(TimeValue.timeValueMillis(interval)); - } - if (timeZone() != null) { - tzRoundingBuilder.timeZone(timeZone()); - } - Rounding rounding = tzRoundingBuilder.build(); - return rounding; - } - @Override protected CompositeValuesSourceConfig innerBuild(SearchContext context, ValuesSourceConfig config) throws IOException { - Rounding rounding = createRounding(); + Rounding rounding = dateHistogramInterval.createRounding(timeZone()); ValuesSource orig = config.toValuesSource(context.getQueryShardContext()); if (orig == null) { orig = ValuesSource.Numeric.EMPTY; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 6d7852a8644..52aebd43c5d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -70,7 +70,7 @@ import static java.util.Collections.unmodifiableMap; * A builder for histograms on date fields. */ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuilder - implements MultiBucketAggregationBuilder { + implements MultiBucketAggregationBuilder, DateIntervalConsumer { public static final String NAME = "date_histogram"; private static DateMathParser EPOCH_MILLIS_PARSER = DateFormatter.forPattern("epoch_millis").toDateMathParser(); @@ -103,19 +103,7 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil PARSER = new ObjectParser<>(DateHistogramAggregationBuilder.NAME); ValuesSourceParserHelper.declareNumericFields(PARSER, true, true, true); - PARSER.declareField((histogram, interval) -> { - if (interval instanceof Long) { - histogram.interval((long) interval); - } else { - histogram.dateHistogramInterval((DateHistogramInterval) interval); - } - }, p -> { - if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { - return p.longValue(); - } else { - return new DateHistogramInterval(p.text()); - } - }, Histogram.INTERVAL_FIELD, ObjectParser.ValueType.LONG); + DateIntervalWrapper.declareIntervalFields(PARSER); PARSER.declareField(DateHistogramAggregationBuilder::offset, p -> { if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { @@ -140,8 +128,7 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil return PARSER.parse(parser, new DateHistogramAggregationBuilder(aggregationName), null); } - private long interval; - private DateHistogramInterval dateHistogramInterval; + private DateIntervalWrapper dateHistogramInterval = new DateIntervalWrapper(); private long offset = 0; private ExtendedBounds extendedBounds; private BucketOrder order = BucketOrder.key(true); @@ -156,7 +143,6 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil protected DateHistogramAggregationBuilder(DateHistogramAggregationBuilder clone, Builder factoriesBuilder, Map metaData) { super(clone, factoriesBuilder, metaData); - this.interval = clone.interval; this.dateHistogramInterval = clone.dateHistogramInterval; this.offset = clone.offset; this.extendedBounds = clone.extendedBounds; @@ -176,8 +162,7 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil order = InternalOrder.Streams.readHistogramOrder(in, true); keyed = in.readBoolean(); minDocCount = in.readVLong(); - interval = in.readLong(); - dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); + dateHistogramInterval = new DateIntervalWrapper(in); offset = in.readLong(); extendedBounds = in.readOptionalWriteable(ExtendedBounds::new); } @@ -187,44 +172,97 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil InternalOrder.Streams.writeHistogramOrder(order, out, true); out.writeBoolean(keyed); out.writeVLong(minDocCount); - out.writeLong(interval); - out.writeOptionalWriteable(dateHistogramInterval); + dateHistogramInterval.writeTo(out); out.writeLong(offset); out.writeOptionalWriteable(extendedBounds); } /** Get the current interval in milliseconds that is set on this builder. */ + @Deprecated public long interval() { - return interval; + return dateHistogramInterval.interval(); } /** Set the interval on this builder, and return the builder so that calls can be chained. * If both {@link #interval()} and {@link #dateHistogramInterval()} are set, then the - * {@link #dateHistogramInterval()} wins. */ + * {@link #dateHistogramInterval()} wins. + * + * @deprecated use {@link #fixedInterval(DateHistogramInterval)} or {@link #calendarInterval(DateHistogramInterval)} instead + * @since 7.2.0 + */ + @Deprecated public DateHistogramAggregationBuilder interval(long interval) { - if (interval < 1) { - throw new IllegalArgumentException("[interval] must be 1 or greater for histogram aggregation [" + name + "]"); - } - this.interval = interval; + dateHistogramInterval.interval(interval); return this; } /** Get the current date interval that is set on this builder. */ + @Deprecated public DateHistogramInterval dateHistogramInterval() { - return dateHistogramInterval; + return dateHistogramInterval.dateHistogramInterval(); } /** Set the interval on this builder, and return the builder so that calls can be chained. * If both {@link #interval()} and {@link #dateHistogramInterval()} are set, then the - * {@link #dateHistogramInterval()} wins. */ - public DateHistogramAggregationBuilder dateHistogramInterval(DateHistogramInterval dateHistogramInterval) { - if (dateHistogramInterval == null) { - throw new IllegalArgumentException("[dateHistogramInterval] must not be null: [" + name + "]"); - } - this.dateHistogramInterval = dateHistogramInterval; + * {@link #dateHistogramInterval()} wins. + * + * @deprecated use {@link #fixedInterval(DateHistogramInterval)} or {@link #calendarInterval(DateHistogramInterval)} instead + * @since 7.2.0 + */ + @Deprecated + public DateHistogramAggregationBuilder dateHistogramInterval(DateHistogramInterval interval) { + dateHistogramInterval.dateHistogramInterval(interval); return this; } + /** + * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units + * are calendar-aware, meaning they respect leap additions, variable days per month, etc. + * + * This is mutually exclusive with {@link DateHistogramAggregationBuilder#fixedInterval(DateHistogramInterval)} + * + * @param interval The calendar interval to use with the aggregation + */ + public DateHistogramAggregationBuilder calendarInterval(DateHistogramInterval interval) { + dateHistogramInterval.calendarInterval(interval); + return this; + } + + /** + * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are + * not calendar aware and are simply multiples of fixed, SI units. + * + * This is mutually exclusive with {@link DateHistogramAggregationBuilder#calendarInterval(DateHistogramInterval)} + * + * @param interval The fixed interval to use with the aggregation + */ + public DateHistogramAggregationBuilder fixedInterval(DateHistogramInterval interval) { + dateHistogramInterval.fixedInterval(interval); + return this; + } + + /** + * Returns the interval as a date time unit if and only if it was configured as a calendar interval originally. + * Returns null otherwise. + */ + public DateHistogramInterval getCalendarInterval() { + if (dateHistogramInterval.getIntervalType().equals(DateIntervalWrapper.IntervalTypeEnum.CALENDAR)) { + return dateHistogramInterval.getAsCalendarInterval(); + } + return null; + } + + /** + * Returns the interval as a fixed time unit if and only if it was configured as a fixed interval originally. + * Returns null otherwise. + */ + public DateHistogramInterval getFixedInterval() { + if (dateHistogramInterval.getIntervalType().equals(DateIntervalWrapper.IntervalTypeEnum.FIXED)) { + return dateHistogramInterval.getAsFixedInterval(); + } + return null; + } + /** Get the offset to use when rounding, which is a number of milliseconds. */ public long offset() { return offset; @@ -338,11 +376,7 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil @Override protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - if (dateHistogramInterval == null) { - builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), interval); - } else { - builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), dateHistogramInterval.toString()); - } + dateHistogramInterval.toXContent(builder, params); builder.field(Histogram.OFFSET_FIELD.getPreferredName(), offset); if (order != null) { @@ -412,13 +446,26 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil // We need all not only values but also rounded values to be within // [prevTransition, nextTransition]. final long low; - Rounding.DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit(); - if (intervalAsUnit != null) { - Rounding rounding = Rounding.builder(intervalAsUnit).timeZone(timeZone()).build(); + + + DateIntervalWrapper.IntervalTypeEnum intervalType = dateHistogramInterval.getIntervalType(); + if (intervalType.equals(DateIntervalWrapper.IntervalTypeEnum.FIXED)) { + low = Math.addExact(prevTransition, dateHistogramInterval.tryIntervalAsFixedUnit().millis()); + } else if (intervalType.equals(DateIntervalWrapper.IntervalTypeEnum.CALENDAR)) { + final Rounding.DateTimeUnit intervalAsUnit = dateHistogramInterval.tryIntervalAsCalendarUnit(); + final Rounding rounding = Rounding.builder(intervalAsUnit).timeZone(timeZone()).build(); low = rounding.nextRoundingValue(prevTransition); } else { - final TimeValue intervalAsMillis = getIntervalAsTimeValue(); - low = Math.addExact(prevTransition, intervalAsMillis.millis()); + // We're not sure what the interval was originally (legacy) so use old behavior of assuming + // calendar first, then fixed. Required because fixed/cal overlap in places ("1h") + Rounding.DateTimeUnit intervalAsUnit = dateHistogramInterval.tryIntervalAsCalendarUnit(); + if (intervalAsUnit != null) { + final Rounding rounding = Rounding.builder(intervalAsUnit).timeZone(timeZone()).build(); + low = rounding.nextRoundingValue(prevTransition); + } else { + final TimeValue intervalAsMillis = dateHistogramInterval.tryIntervalAsFixedUnit(); + low = Math.addExact(prevTransition, intervalAsMillis.millis()); + } } // rounding rounds down, so 'nextTransition' is a good upper bound final long high = nextTransition; @@ -440,13 +487,13 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { final ZoneId tz = timeZone(); - final Rounding rounding = createRounding(tz); + final Rounding rounding = dateHistogramInterval.createRounding(tz); final ZoneId rewrittenTimeZone = rewriteTimeZone(context.getQueryShardContext()); final Rounding shardRounding; if (tz == rewrittenTimeZone) { shardRounding = rounding; } else { - shardRounding = createRounding(rewrittenTimeZone); + shardRounding = dateHistogramInterval.createRounding(rewrittenTimeZone); } ExtendedBounds roundedBounds = null; @@ -458,47 +505,9 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil rounding, shardRounding, roundedBounds, context, parent, subFactoriesBuilder, metaData); } - /** Return the interval as a date time unit if applicable. If this returns - * {@code null} then it means that the interval is expressed as a fixed - * {@link TimeValue} and may be accessed via - * {@link #getIntervalAsTimeValue()}. */ - private Rounding.DateTimeUnit getIntervalAsDateTimeUnit() { - if (dateHistogramInterval != null) { - return DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); - } - return null; - } - - /** - * Get the interval as a {@link TimeValue}. Should only be called if - * {@link #getIntervalAsDateTimeUnit()} returned {@code null}. - */ - private TimeValue getIntervalAsTimeValue() { - if (dateHistogramInterval != null) { - return TimeValue.parseTimeValue(dateHistogramInterval.toString(), null, getClass().getSimpleName() + ".interval"); - } else { - return TimeValue.timeValueMillis(interval); - } - } - - private Rounding createRounding(ZoneId timeZone) { - Rounding.Builder tzRoundingBuilder; - Rounding.DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit(); - if (intervalAsUnit != null) { - tzRoundingBuilder = Rounding.builder(intervalAsUnit); - } else { - tzRoundingBuilder = Rounding.builder(getIntervalAsTimeValue()); - } - if (timeZone != null) { - tzRoundingBuilder.timeZone(timeZone); - } - Rounding rounding = tzRoundingBuilder.build(); - return rounding; - } - @Override protected int innerHashCode() { - return Objects.hash(order, keyed, minDocCount, interval, dateHistogramInterval, minDocCount, extendedBounds); + return Objects.hash(order, keyed, minDocCount, dateHistogramInterval, minDocCount, extendedBounds); } @Override @@ -507,7 +516,6 @@ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuil return Objects.equals(order, other.order) && Objects.equals(keyed, other.keyed) && Objects.equals(minDocCount, other.minDocCount) - && Objects.equals(interval, other.interval) && Objects.equals(dateHistogramInterval, other.dateHistogramInterval) && Objects.equals(offset, other.offset) && Objects.equals(extendedBounds, other.extendedBounds); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java index c01a1190ff3..08a4a3bf76f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramInterval.java @@ -19,9 +19,12 @@ package org.elasticsearch.search.aggregations.bucket.histogram; +import org.elasticsearch.common.Rounding; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -107,4 +110,21 @@ public class DateHistogramInterval implements Writeable, ToXContentFragment { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return builder.value(toString()); } + + /** + * Converts this DateHistogramInterval into a millisecond representation. If this is a calendar + * interval, it is an approximation of milliseconds based on the fixed equivalent (e.g. `1h` is treated as 60 + * fixed minutes, rather than the hour at a specific point in time. + * + * This is merely a convenience helper for quick comparisons and should not be used for situations that + * require precise durations. + */ + public long estimateMillis() { + if (Strings.isNullOrEmpty(expression) == false && DateHistogramAggregationBuilder.DATE_FIELD_UNITS.containsKey(expression)) { + Rounding.DateTimeUnit intervalUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(expression); + return intervalUnit.getField().getBaseUnit().getDuration().getSeconds() * 1000; + } else { + return TimeValue.parseTimeValue(expression, "DateHistogramInterval#estimateMillis").getMillis(); + } + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java new file mode 100644 index 00000000000..a53369e2a37 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalConsumer.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.histogram; + +/** + * A shared interface for aggregations that parse and use "interval" parameters. + * + * Provides definitions for the new fixed and calendar intervals, and deprecated + * defintions for the old interval/dateHisto interval parameters + */ +public interface DateIntervalConsumer { + @Deprecated + T interval(long interval); + @Deprecated + T dateHistogramInterval(DateHistogramInterval dateHistogramInterval); + T calendarInterval(DateHistogramInterval interval); + T fixedInterval(DateHistogramInterval interval); + + @Deprecated + long interval(); + @Deprecated + DateHistogramInterval dateHistogramInterval(); +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java new file mode 100644 index 00000000000..229fa0d15bb --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -0,0 +1,427 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.apache.logging.log4j.LogManager; +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Rounding; +import org.elasticsearch.common.Rounding.DateTimeUnit; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.time.ZoneId; +import java.util.Locale; +import java.util.Objects; + +/** + * A class that handles all the parsing, bwc and deprecations surrounding date histogram intervals. + * + * - Provides parser helpers for the deprecated interval/dateHistogramInterval parameters. + * - Provides parser helpers for the new calendar/fixed interval parameters + * - Can read old intervals from a stream and convert to new intervals + * - Can write new intervals to old format when streaming out + * - Provides a variety of helper methods to interpret the intervals as different types, depending on caller's need + * + * After the deprecated parameters are removed, this class can be simplified greatly. The legacy options + * will be removed, and the mutual-exclusion checks can be done in the setters directly removing the need + * for the enum and the complicated "state machine" logic + */ +public class DateIntervalWrapper implements ToXContentFragment, Writeable { + private static final DeprecationLogger DEPRECATION_LOGGER + = new DeprecationLogger(LogManager.getLogger(DateHistogramAggregationBuilder.class)); + private static final String DEPRECATION_TEXT = "[interval] on [date_histogram] is deprecated, use [fixed_interval] or " + + "[calendar_interval] in the future."; + + private static final ParseField FIXED_INTERVAL_FIELD = new ParseField("fixed_interval"); + private static final ParseField CALENDAR_INTERVAL_FIELD = new ParseField("calendar_interval"); + + public enum IntervalTypeEnum implements Writeable { + NONE, FIXED, CALENDAR, LEGACY_INTERVAL, LEGACY_DATE_HISTO; + + public static IntervalTypeEnum fromString(String name) { + return valueOf(name.trim().toUpperCase(Locale.ROOT)); + } + + public static IntervalTypeEnum fromStream(StreamInput in) throws IOException { + return in.readEnum(IntervalTypeEnum.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeEnum(this); + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } + } + + private DateHistogramInterval dateHistogramInterval; + private IntervalTypeEnum intervalType = IntervalTypeEnum.NONE; + + public static void declareIntervalFields(ObjectParser parser) { + + // NOTE: this field is deprecated and will be removed + parser.declareField((wrapper, interval) -> { + if (interval instanceof Long) { + wrapper.interval((long) interval); + } else { + wrapper.dateHistogramInterval((DateHistogramInterval) interval); + } + }, p -> { + if (p.currentToken() == XContentParser.Token.VALUE_NUMBER) { + return p.longValue(); + } else { + return new DateHistogramInterval(p.text()); + } + }, Histogram.INTERVAL_FIELD, ObjectParser.ValueType.LONG); + + parser.declareField(DateIntervalConsumer::calendarInterval, + p -> new DateHistogramInterval(p.text()), CALENDAR_INTERVAL_FIELD, ObjectParser.ValueType.STRING); + + parser.declareField(DateIntervalConsumer::fixedInterval, + p -> new DateHistogramInterval(p.text()), FIXED_INTERVAL_FIELD, ObjectParser.ValueType.STRING); + } + + public DateIntervalWrapper() {} + + public DateIntervalWrapper(StreamInput in) throws IOException { + if (in.getVersion().before(Version.V_7_2_0)) { + long interval = in.readLong(); + DateHistogramInterval histoInterval = in.readOptionalWriteable(DateHistogramInterval::new); + + if (histoInterval != null) { + dateHistogramInterval = histoInterval; + intervalType = IntervalTypeEnum.LEGACY_DATE_HISTO; + } else { + dateHistogramInterval = new DateHistogramInterval(interval + "ms"); + intervalType = IntervalTypeEnum.LEGACY_INTERVAL; + } + } else { + dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); + intervalType = IntervalTypeEnum.fromStream(in); + } + } + + public IntervalTypeEnum getIntervalType() { + return intervalType; + } + + /** Get the current interval in milliseconds that is set on this builder. */ + @Deprecated + public long interval() { + DEPRECATION_LOGGER.deprecated(DEPRECATION_TEXT); + if (intervalType.equals(IntervalTypeEnum.LEGACY_INTERVAL)) { + return TimeValue.parseTimeValue(dateHistogramInterval.toString(), "interval").getMillis(); + } + return 0; + } + + /** Set the interval on this builder, and return the builder so that calls can be chained. + * If both {@link #interval()} and {@link #dateHistogramInterval()} are set, then the + * {@link #dateHistogramInterval()} wins. + * + * @deprecated use {@link DateHistogramAggregationBuilder#fixedInterval(DateHistogramInterval)} + * or {@link DateHistogramAggregationBuilder#calendarInterval(DateHistogramInterval)} instead + * @since 7.2.0 + */ + @Deprecated + public void interval(long interval) { + if (interval < 1) { + throw new IllegalArgumentException("[interval] must be 1 or greater for aggregation [date_histogram]"); + } + setIntervalType(IntervalTypeEnum.LEGACY_INTERVAL); + DEPRECATION_LOGGER.deprecated(DEPRECATION_TEXT); + this.dateHistogramInterval = new DateHistogramInterval(interval + "ms"); + } + + /** Get the current date interval that is set on this builder. */ + @Deprecated + public DateHistogramInterval dateHistogramInterval() { + DEPRECATION_LOGGER.deprecated(DEPRECATION_TEXT); + if (intervalType.equals(IntervalTypeEnum.LEGACY_DATE_HISTO)) { + return dateHistogramInterval; + } + return null; + } + + /** Set the interval on this builder, and return the builder so that calls can be chained. + * If both {@link #interval()} and {@link #dateHistogramInterval()} are set, then the + * {@link #dateHistogramInterval()} wins. + * + * @deprecated use {@link DateIntervalWrapper#fixedInterval(DateHistogramInterval)} + * or {@link DateIntervalWrapper#calendarInterval(DateHistogramInterval)} instead + * @since 7.2.0 + */ + @Deprecated + public void dateHistogramInterval(DateHistogramInterval dateHistogramInterval) { + if (dateHistogramInterval == null || Strings.isNullOrEmpty(dateHistogramInterval.toString())) { + throw new IllegalArgumentException("[dateHistogramInterval] must not be null: [date_histogram]"); + } + setIntervalType(IntervalTypeEnum.LEGACY_DATE_HISTO); + DEPRECATION_LOGGER.deprecated(DEPRECATION_TEXT); + this.dateHistogramInterval = dateHistogramInterval; + } + + /** + * Returns the interval as a calendar interval. Throws an exception if the value cannot be converted + * into a calendar interval + */ + public DateHistogramInterval getAsCalendarInterval() { + if (intervalType.equals(IntervalTypeEnum.CALENDAR) || tryIntervalAsCalendarUnit() != null) { + return dateHistogramInterval; + } + throw new IllegalStateException("Cannot convert [" + intervalType.toString() + "] interval type into calendar interval"); + } + + /** + * Sets the interval of the DateHistogram using calendar units (`1d`, `1w`, `1M`, etc). These units + * are calendar-aware, meaning they respect leap additions, variable days per month, etc. + * + * This is mutually exclusive with {@link DateIntervalWrapper#fixedInterval(DateHistogramInterval)} + * + * @param interval The fixed interval to use + */ + public void calendarInterval(DateHistogramInterval interval) { + if (interval == null || Strings.isNullOrEmpty(interval.toString())) { + throw new IllegalArgumentException("[interval] must not be null: [date_histogram]"); + } + if (DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(interval.toString()) == null) { + throw new IllegalArgumentException("The supplied interval [" + interval +"] could not be parsed " + + "as a calendar interval."); + } + setIntervalType(IntervalTypeEnum.CALENDAR); + this.dateHistogramInterval = interval; + } + + /** + * Returns the interval as a Fixed interval. Throws an exception if the value cannot be converted + * into a fixed interval + */ + public DateHistogramInterval getAsFixedInterval() { + if (intervalType.equals(IntervalTypeEnum.FIXED) || tryIntervalAsFixedUnit() != null) { + return dateHistogramInterval; + } + throw new IllegalStateException("Cannot convert [" + intervalType.toString() + "] interval type into fixed interval"); + } + + /** + * Sets the interval of the DateHistogram using fixed units (`1ms`, `1s`, `10m`, `4h`, etc). These are + * not calendar aware and are simply multiples of fixed, SI units. + * + * This is mutually exclusive with {@link DateIntervalWrapper#calendarInterval(DateHistogramInterval)} + * + * @param interval The fixed interval to use + */ + public void fixedInterval(DateHistogramInterval interval) { + if (interval == null || Strings.isNullOrEmpty(interval.toString())) { + throw new IllegalArgumentException("[interval] must not be null: [date_histogram]"); + } + setIntervalType(IntervalTypeEnum.FIXED); + // Parse to make sure it is a valid fixed too + TimeValue.parseTimeValue(interval.toString(), DateHistogramAggregationBuilder.NAME + ".fixedInterval"); + this.dateHistogramInterval = interval; + } + + /** Return the interval as a date time unit if applicable, regardless of how it was configured. If this returns + * {@code null} then it means that the interval is expressed as a fixed + * {@link TimeValue} and may be accessed via {@link #tryIntervalAsFixedUnit()}. */ + DateTimeUnit tryIntervalAsCalendarUnit() { + if (intervalType.equals(IntervalTypeEnum.CALENDAR) || intervalType.equals(IntervalTypeEnum.LEGACY_DATE_HISTO)) { + return DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); + } + return null; + } + + /** + * Get the interval as a {@link TimeValue}, regardless of how it was configured. Returns null if + * the interval cannot be parsed as a fixed time. + */ + TimeValue tryIntervalAsFixedUnit() { + if (dateHistogramInterval == null || Strings.isNullOrEmpty(dateHistogramInterval.toString())) { + return null; + } + try { + return TimeValue.parseTimeValue(dateHistogramInterval.toString(), null, getClass().getSimpleName() + ".interval"); + } catch (IllegalArgumentException e) { + return null; + } + } + + public Rounding createRounding(ZoneId timeZone) { + Rounding.Builder tzRoundingBuilder; + if (isEmpty()) { + throw new IllegalArgumentException("Invalid interval specified, must be non-null and non-empty"); + } + DateIntervalWrapper.IntervalTypeEnum intervalType = getIntervalType(); + if (intervalType.equals(DateIntervalWrapper.IntervalTypeEnum.FIXED)) { + tzRoundingBuilder = Rounding.builder(tryIntervalAsFixedUnit()); + } else if (intervalType.equals(DateIntervalWrapper.IntervalTypeEnum.CALENDAR)) { + tzRoundingBuilder = Rounding.builder(tryIntervalAsCalendarUnit()); + } else { + // We're not sure what the interval was originally (legacy) so use old behavior of assuming + // calendar first, then fixed. Required because fixed/cal overlap in places ("1h") + DateTimeUnit calInterval = tryIntervalAsCalendarUnit(); + TimeValue fixedInterval = tryIntervalAsFixedUnit(); + if (calInterval != null) { + tzRoundingBuilder = Rounding.builder(calInterval); + } else if (fixedInterval != null) { + tzRoundingBuilder = Rounding.builder(fixedInterval); + } else { + // If we get here we have exhausted our options and are not able to parse this interval + throw new IllegalArgumentException("Unable to parse interval [" + dateHistogramInterval + "]"); + } + } + if (timeZone != null) { + tzRoundingBuilder.timeZone(timeZone); + } + return tzRoundingBuilder.build(); + } + + private void setIntervalType(IntervalTypeEnum type) { + // If we're the same or have no existing type, just use the provided type + if (intervalType.equals(IntervalTypeEnum.NONE) || type.equals(intervalType)) { + intervalType = type; + return; + } + + // interval() method + switch (type) { + case LEGACY_INTERVAL: + if (intervalType.equals(IntervalTypeEnum.CALENDAR) || intervalType.equals(IntervalTypeEnum.FIXED)) { + throw new IllegalArgumentException("Cannot use [interval] with [fixed_interval] or [calendar_interval] " + + "configuration options."); + } + + // dateHistogramInterval() takes precedence over interval() + if (intervalType.equals(IntervalTypeEnum.LEGACY_DATE_HISTO) == false) { + intervalType = IntervalTypeEnum.LEGACY_INTERVAL; + } + break; + + case LEGACY_DATE_HISTO: + if (intervalType.equals(IntervalTypeEnum.CALENDAR) || intervalType.equals(IntervalTypeEnum.FIXED)) { + throw new IllegalArgumentException("Cannot use [interval] with [fixed_interval] or [calendar_interval] " + + "configuration options."); + } + + // dateHistogramInterval() takes precedence over interval() + intervalType = IntervalTypeEnum.LEGACY_DATE_HISTO; + break; + + case FIXED: + if (intervalType.equals(IntervalTypeEnum.LEGACY_INTERVAL) || intervalType.equals(IntervalTypeEnum.LEGACY_DATE_HISTO)) { + throw new IllegalArgumentException("Cannot use [fixed_interval] with [interval] " + + "configuration option."); + } + if (intervalType.equals(IntervalTypeEnum.CALENDAR)) { + throw new IllegalArgumentException("Cannot use [fixed_interval] with [calendar_interval] " + + "configuration option."); + } + intervalType = IntervalTypeEnum.FIXED; + break; + + case CALENDAR: + if (intervalType.equals(IntervalTypeEnum.LEGACY_INTERVAL) || intervalType.equals(IntervalTypeEnum.LEGACY_DATE_HISTO)) { + throw new IllegalArgumentException("Cannot use [calendar_interval] with [interval] " + + "configuration option."); + } + if (intervalType.equals(IntervalTypeEnum.FIXED)) { + throw new IllegalArgumentException("Cannot use [calendar_interval] with [fixed_interval] " + + "configuration option."); + } + intervalType = IntervalTypeEnum.CALENDAR; + break; + + default: + throw new IllegalStateException("Unknown interval type."); + } + } + + public boolean isEmpty() { + if (intervalType.equals(IntervalTypeEnum.NONE)) { + return true; + } + return dateHistogramInterval == null || Strings.isNullOrEmpty(dateHistogramInterval.toString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getVersion().before(Version.V_7_2_0)) { + if (intervalType.equals(IntervalTypeEnum.LEGACY_INTERVAL)) { + out.writeLong(TimeValue.parseTimeValue(dateHistogramInterval.toString(), + DateHistogramAggregationBuilder.NAME + ".innerWriteTo").getMillis()); + } else { + out.writeLong(0L); + } + out.writeOptionalWriteable(dateHistogramInterval); + } else { + out.writeOptionalWriteable(dateHistogramInterval); + intervalType.writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (intervalType.equals(IntervalTypeEnum.LEGACY_DATE_HISTO) || intervalType.equals(IntervalTypeEnum.LEGACY_INTERVAL)) { + builder.field(Histogram.INTERVAL_FIELD.getPreferredName(), dateHistogramInterval.toString()); + } else if (intervalType.equals(IntervalTypeEnum.FIXED)){ + builder.field(FIXED_INTERVAL_FIELD.getPreferredName(), dateHistogramInterval.toString()); + } else if (intervalType.equals(IntervalTypeEnum.CALENDAR)) { + builder.field(CALENDAR_INTERVAL_FIELD.getPreferredName(), dateHistogramInterval.toString()); + } + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + + final DateIntervalWrapper that = (DateIntervalWrapper) other; + if (tryIntervalAsCalendarUnit() != null && that.tryIntervalAsCalendarUnit() == null) { + return false; + } + if (tryIntervalAsCalendarUnit() == null && that.tryIntervalAsCalendarUnit() != null) { + return false; + } + return Objects.equals(this.dateHistogramInterval, that.dateHistogramInterval); + } + + @Override + public int hashCode() { + boolean isCalendar = tryIntervalAsCalendarUnit() != null; + return Objects.hash(dateHistogramInterval, isCalendar); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java index 7a4e0fb7059..1fd8580e290 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java @@ -148,7 +148,7 @@ public class AggregatorFactoriesTests extends ESTestCase { .startObject("by_date") .startObject("date_histogram") .field("field", "timestamp") - .field("interval", "month") + .field("calendar_interval", "month") .endObject() .startObject("aggs") // the aggregation name is missing @@ -172,7 +172,7 @@ public class AggregatorFactoriesTests extends ESTestCase { .startObject("by_date") .startObject("date_histogram") .field("field", "timestamp") - .field("interval", "month") + .field("calendar_interval", "month") .endObject() .startObject("aggs") .startObject("tag_count") diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/MissingValueIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/MissingValueIT.java index 40ac3e49f3a..1b0fbf5bbcd 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/MissingValueIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/MissingValueIT.java @@ -158,7 +158,7 @@ public class MissingValueIT extends ESIntegTestCase { public void testDateHistogram() { SearchResponse response = client().prepareSearch("idx") .addAggregation( - dateHistogram("my_histogram").field("date").dateHistogramInterval(DateHistogramInterval.YEAR).missing("2014-05-07")) + dateHistogram("my_histogram").field("date").calendarInterval(DateHistogramInterval.YEAR).missing("2014-05-07")) .get(); assertSearchResponse(response); Histogram histogram = response.getAggregations().get("my_histogram"); @@ -170,7 +170,7 @@ public class MissingValueIT extends ESIntegTestCase { response = client().prepareSearch("idx") .addAggregation( - dateHistogram("my_histogram").field("date").dateHistogramInterval(DateHistogramInterval.YEAR).missing("2015-05-07")) + dateHistogram("my_histogram").field("date").calendarInterval(DateHistogramInterval.YEAR).missing("2015-05-07")) .get(); assertSearchResponse(response); histogram = response.getAggregations().get("my_histogram"); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index eafd88328b7..ad2939347ed 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -1359,7 +1359,7 @@ public class DateHistogramIT extends ESIntegTestCase { .addAggregation(dateHistogram("histo").field("date").interval(-TimeUnit.DAYS.toMillis(1)).minDocCount(0)).get(); fail(); } catch (IllegalArgumentException e) { - assertThat(e.toString(), containsString("[interval] must be 1 or greater for histogram aggregation [histo]")); + assertThat(e.toString(), containsString("[interval] must be 1 or greater for aggregation [date_histogram]")); } } @@ -1433,7 +1433,7 @@ public class DateHistogramIT extends ESIntegTestCase { SearchResponse response = client().prepareSearch("idx") .setQuery(new MatchNoneQueryBuilder()) .addAggregation(dateHistogram("histo").field("date").timeZone(ZoneId.of("Europe/Oslo")) - .dateHistogramInterval(DateHistogramInterval.HOUR).minDocCount(0).extendedBounds( + .calendarInterval(DateHistogramInterval.HOUR).minDocCount(0).extendedBounds( new ExtendedBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00"))) .get(); @@ -1446,6 +1446,23 @@ public class DateHistogramIT extends ESIntegTestCase { ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); assertThat(((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); + + response = client().prepareSearch("idx") + .setQuery(new MatchNoneQueryBuilder()) + .addAggregation(dateHistogram("histo").field("date").timeZone(ZoneId.of("Europe/Oslo")) + .dateHistogramInterval(DateHistogramInterval.HOUR).minDocCount(0).extendedBounds( + new ExtendedBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00"))) + .get(); + + histo = response.getAggregations().get("histo"); + buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(4)); + assertThat(((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - + ((ZonedDateTime) buckets.get(0).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); + assertThat(((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - + ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); + assertThat(((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - + ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); } /** diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java index d31f7a89b46..08b8cb13a33 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java @@ -36,10 +36,11 @@ public class CompositeAggregationBuilderTests extends BaseAggregationTestCase {} )); assertThat(exc.getMessage(), containsString("failed to parse date field [1474329600000]")); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } public void testWithDateHistogramAndTimeZone() throws IOException { @@ -1209,6 +1214,8 @@ public class CompositeAggregatorTests extends AggregatorTestCase { assertEquals(2L, result.getBuckets().get(1).getDocCount()); } ); + + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } public void testWithDateHistogramAndKeyword() throws IOException { @@ -1286,6 +1293,8 @@ public class CompositeAggregatorTests extends AggregatorTestCase { assertEquals(1L, result.getBuckets().get(2).getDocCount()); } ); + + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } public void testWithKeywordAndHistogram() throws IOException { @@ -1482,6 +1491,8 @@ public class CompositeAggregatorTests extends AggregatorTestCase { assertEquals(1L, result.getBuckets().get(3).getDocCount()); } ); + + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } public void testWithKeywordAndTopHits() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java new file mode 100644 index 00000000000..aab225ddf8e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/LegacyIntervalCompositeAggBuilderTests.java @@ -0,0 +1,155 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.composite; + +import org.elasticsearch.script.Script; +import org.elasticsearch.search.aggregations.BaseAggregationTestCase; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.sort.SortOrder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * Duplicates the tests from {@link CompositeAggregationBuilderTests}, except using the deprecated + * interval on date histo. Separated to make testing the warnings easier. + * + * Can be removed in when the legacy interval options are gone + */ +public class LegacyIntervalCompositeAggBuilderTests extends BaseAggregationTestCase { + + private DateHistogramValuesSourceBuilder randomDateHistogramSourceBuilder() { + DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10)); + if (randomBoolean()) { + histo.field(randomAlphaOfLengthBetween(1, 20)); + } else { + histo.script(new Script(randomAlphaOfLengthBetween(10, 20))); + } + if (randomBoolean()) { + histo.dateHistogramInterval(randomFrom(DateHistogramInterval.days(1), + DateHistogramInterval.minutes(1), DateHistogramInterval.weeks(1))); + } else { + histo.interval(randomNonNegativeLong()); + } + if (randomBoolean()) { + histo.timeZone(randomZone()); + } + if (randomBoolean()) { + histo.missingBucket(true); + } + return histo; + } + + private TermsValuesSourceBuilder randomTermsSourceBuilder() { + TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10)); + if (randomBoolean()) { + terms.field(randomAlphaOfLengthBetween(1, 20)); + } else { + terms.script(new Script(randomAlphaOfLengthBetween(10, 20))); + } + terms.order(randomFrom(SortOrder.values())); + if (randomBoolean()) { + terms.missingBucket(true); + } + return terms; + } + + private HistogramValuesSourceBuilder randomHistogramSourceBuilder() { + HistogramValuesSourceBuilder histo = new HistogramValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10)); + if (randomBoolean()) { + histo.field(randomAlphaOfLengthBetween(1, 20)); + } else { + histo.script(new Script(randomAlphaOfLengthBetween(10, 20))); + } + if (randomBoolean()) { + histo.missingBucket(true); + } + histo.interval(randomDoubleBetween(Math.nextUp(0), Double.MAX_VALUE, false)); + return histo; + } + + @Override + protected CompositeAggregationBuilder createTestAggregatorBuilder() { + int numSources = randomIntBetween(1, 10); + List> sources = new ArrayList<>(); + // ensure we add at least one date histo + sources.add(randomDateHistogramSourceBuilder()); + for (int i = 0; i < numSources; i++) { + int type = randomIntBetween(0, 2); + switch (type) { + case 0: + sources.add(randomTermsSourceBuilder()); + break; + case 1: + sources.add(randomDateHistogramSourceBuilder()); + break; + case 2: + sources.add(randomHistogramSourceBuilder()); + break; + default: + throw new AssertionError("wrong branch"); + } + } + return new CompositeAggregationBuilder(randomAlphaOfLength(10), sources); + } + + @Override + public void testFromXContent() throws IOException { + super.testFromXContent(); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + } + + @Override + public void testFromXContentMulti() throws IOException { + super.testFromXContentMulti(); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + } + + @Override + public void testSerializationMulti() throws IOException { + super.testSerializationMulti(); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + } + + @Override + public void testToString() throws IOException { + super.testToString(); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + } + + @Override + public void testSerialization() throws IOException { + super.testSerialization(); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + } + + @Override + public void testEqualsAndHashcode() throws IOException { + super.testEqualsAndHashcode(); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + } + + @Override + public void testShallowCopy() { + super.testShallowCopy(); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index 3ce74b04e23..17581b94584 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -34,15 +34,19 @@ import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.MultiBucketConsumerService.TooManyBucketsException; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.function.Consumer; +import static org.hamcrest.Matchers.equalTo; + public class DateHistogramAggregatorTests extends AggregatorTestCase { private static final String DATE_FIELD = "date"; @@ -60,7 +64,7 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { "2016-03-04T17:09:50", "2017-12-12T22:55:46"); - public void testMatchNoDocs() throws IOException { + public void testMatchNoDocsDeprecatedInterval() throws IOException { testBothCases(new MatchNoDocsQuery(), dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), histogram -> { @@ -68,9 +72,21 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { assertFalse(AggregationInspectionHelper.hasValue(histogram)); } ); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } - public void testMatchAllDocs() throws IOException { + public void testMatchNoDocs() throws IOException { + testBothCases(new MatchNoDocsQuery(), dataset, + aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), + histogram -> assertEquals(0, histogram.getBuckets().size()) + ); + testBothCases(new MatchNoDocsQuery(), dataset, + aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD), + histogram -> assertEquals(0, histogram.getBuckets().size()) + ); + } + + public void testMatchAllDocsDeprecatedInterval() throws IOException { Query query = new MatchAllDocsQuery(); testSearchCase(query, dataset, @@ -94,9 +110,49 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { assertTrue(AggregationInspectionHelper.hasValue(histogram)); } ); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } - public void testNoDocs() throws IOException { + public void testMatchAllDocs() throws IOException { + Query query = new MatchAllDocsQuery(); + + List foo = new ArrayList<>(); + for (int i = 0; i < 1000; i++) { + foo.add(dataset.get(randomIntBetween(0, dataset.size()-1))); + } + testSearchAndReduceCase(query, foo, + aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD).order(BucketOrder.count(false)), + histogram -> assertEquals(8, histogram.getBuckets().size()) + ); + + testSearchCase(query, dataset, + aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), + histogram -> assertEquals(6, histogram.getBuckets().size()) + ); + testSearchAndReduceCase(query, dataset, + aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), + histogram -> assertEquals(8, histogram.getBuckets().size()) + ); + testBothCases(query, dataset, + aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD).minDocCount(1L), + histogram -> assertEquals(6, histogram.getBuckets().size()) + ); + + testSearchCase(query, dataset, + aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD), + histogram -> assertEquals(6, histogram.getBuckets().size()) + ); + testSearchAndReduceCase(query, dataset, + aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD), + histogram -> assertEquals(8, histogram.getBuckets().size()) + ); + testBothCases(query, dataset, + aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD).minDocCount(1L), + histogram -> assertEquals(6, histogram.getBuckets().size()) + ); + } + + public void testNoDocsDeprecatedInterval() throws IOException { Query query = new MatchNoDocsQuery(); List dates = Collections.emptyList(); Consumer aggregation = agg -> @@ -111,9 +167,32 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { testSearchAndReduceCase(query, dates, aggregation, histogram -> assertNull(histogram) ); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } - public void testAggregateWrongField() throws IOException { + public void testNoDocs() throws IOException { + Query query = new MatchNoDocsQuery(); + List dates = Collections.emptyList(); + Consumer aggregation = agg -> + agg.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD); + testSearchCase(query, dates, aggregation, + histogram -> assertEquals(0, histogram.getBuckets().size()) + ); + testSearchAndReduceCase(query, dates, aggregation, + histogram -> assertNull(histogram) + ); + + aggregation = agg -> + agg.fixedInterval(new DateHistogramInterval("365d")).field(DATE_FIELD); + testSearchCase(query, dates, aggregation, + histogram -> assertEquals(0, histogram.getBuckets().size()) + ); + testSearchAndReduceCase(query, dates, aggregation, + histogram -> assertNull(histogram) + ); + } + + public void testAggregateWrongFieldDeprecated() throws IOException { testBothCases(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field("wrong_field"), histogram -> { @@ -121,9 +200,21 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { assertFalse(AggregationInspectionHelper.hasValue(histogram)); } ); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } - public void testIntervalYear() throws IOException { + public void testAggregateWrongField() throws IOException { + testBothCases(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field("wrong_field"), + histogram -> assertEquals(0, histogram.getBuckets().size()) + ); + testBothCases(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.fixedInterval(new DateHistogramInterval("365d")).field("wrong_field"), + histogram -> assertEquals(0, histogram.getBuckets().size()) + ); + } + + public void testIntervalYearDeprecated() throws IOException { testBothCases(LongPoint.newRangeQuery(INSTANT_FIELD, asLong("2015-01-01"), asLong("2017-12-31")), dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), histogram -> { @@ -143,9 +234,32 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { assertEquals(1, bucket.getDocCount()); } ); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } - public void testIntervalMonth() throws IOException { + public void testIntervalYear() throws IOException { + testBothCases(LongPoint.newRangeQuery(INSTANT_FIELD, asLong("2015-01-01"), asLong("2017-12-31")), dataset, + aggregation -> aggregation.calendarInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2015-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2016-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + } + ); + } + + public void testIntervalMonthDeprecated() throws IOException { testBothCases(new MatchAllDocsQuery(), Arrays.asList("2017-01-01", "2017-02-02", "2017-02-03", "2017-03-04", "2017-03-05", "2017-03-06"), aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.MONTH).field(DATE_FIELD), @@ -166,9 +280,33 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { assertEquals(3, bucket.getDocCount()); } ); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } - public void testIntervalDay() throws IOException { + public void testIntervalMonth() throws IOException { + testBothCases(new MatchAllDocsQuery(), + Arrays.asList("2017-01-01", "2017-02-02", "2017-02-03", "2017-03-04", "2017-03-05", "2017-03-06"), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.MONTH).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-03-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + } + ); + } + + public void testIntervalDayDeprecated() throws IOException { testBothCases(new MatchAllDocsQuery(), Arrays.asList( "2017-02-01", @@ -201,9 +339,77 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { assertEquals(1, bucket.getDocCount()); } ); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } - public void testIntervalHour() throws IOException { + public void testIntervalDay() throws IOException { + testBothCases(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY).field(DATE_FIELD).minDocCount(1L), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(4, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + } + ); + testBothCases(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation.fixedInterval(new DateHistogramInterval("24h")).field(DATE_FIELD).minDocCount(1L), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(4, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + } + ); + } + + public void testIntervalHourDeprecated() throws IOException { testBothCases(new MatchAllDocsQuery(), Arrays.asList( "2017-02-01T09:02:00.000Z", @@ -247,9 +453,99 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { assertEquals(3, bucket.getDocCount()); } ); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } - public void testIntervalMinute() throws IOException { + public void testIntervalHour() throws IOException { + testBothCases(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T09:02:00.000Z", + "2017-02-01T09:35:00.000Z", + "2017-02-01T10:15:00.000Z", + "2017-02-01T13:06:00.000Z", + "2017-02-01T14:04:00.000Z", + "2017-02-01T14:05:00.000Z", + "2017-02-01T15:59:00.000Z", + "2017-02-01T16:06:00.000Z", + "2017-02-01T16:48:00.000Z", + "2017-02-01T16:59:00.000Z" + ), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.HOUR).field(DATE_FIELD).minDocCount(1L), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(6, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T09:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T10:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T13:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-01T14:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(4); + assertEquals("2017-02-01T15:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(5); + assertEquals("2017-02-01T16:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + } + ); + testBothCases(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T09:02:00.000Z", + "2017-02-01T09:35:00.000Z", + "2017-02-01T10:15:00.000Z", + "2017-02-01T13:06:00.000Z", + "2017-02-01T14:04:00.000Z", + "2017-02-01T14:05:00.000Z", + "2017-02-01T15:59:00.000Z", + "2017-02-01T16:06:00.000Z", + "2017-02-01T16:48:00.000Z", + "2017-02-01T16:59:00.000Z" + ), + aggregation -> aggregation.fixedInterval(new DateHistogramInterval("60m")).field(DATE_FIELD).minDocCount(1L), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(6, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T09:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T10:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T13:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-01T14:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(4); + assertEquals("2017-02-01T15:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(5); + assertEquals("2017-02-01T16:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + } + ); + } + + public void testIntervalMinuteDeprecated() throws IOException { testBothCases(new MatchAllDocsQuery(), Arrays.asList( "2017-02-01T09:02:35.000Z", @@ -276,9 +572,65 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { assertEquals(2, bucket.getDocCount()); } ); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } - public void testIntervalSecond() throws IOException { + public void testIntervalMinute() throws IOException { + testBothCases(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T09:02:35.000Z", + "2017-02-01T09:02:59.000Z", + "2017-02-01T09:15:37.000Z", + "2017-02-01T09:16:04.000Z", + "2017-02-01T09:16:42.000Z" + ), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.MINUTE).field(DATE_FIELD).minDocCount(1L), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T09:02:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T09:15:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T09:16:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + } + ); + testBothCases(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T09:02:35.000Z", + "2017-02-01T09:02:59.000Z", + "2017-02-01T09:15:37.000Z", + "2017-02-01T09:16:04.000Z", + "2017-02-01T09:16:42.000Z" + ), + aggregation -> aggregation.fixedInterval(new DateHistogramInterval("60s")).field(DATE_FIELD).minDocCount(1L), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T09:02:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T09:15:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T09:16:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + } + ); + } + + public void testIntervalSecondDeprecated() throws IOException { testBothCases(new MatchAllDocsQuery(), Arrays.asList( "2017-02-01T00:00:05.015Z", @@ -306,9 +658,67 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { assertEquals(3, bucket.getDocCount()); } ); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } - public void testMinDocCount() throws IOException { + public void testIntervalSecond() throws IOException { + testBothCases(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T00:00:05.015Z", + "2017-02-01T00:00:11.299Z", + "2017-02-01T00:00:11.074Z", + "2017-02-01T00:00:37.688Z", + "2017-02-01T00:00:37.210Z", + "2017-02-01T00:00:37.380Z" + ), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.SECOND).field(DATE_FIELD).minDocCount(1L), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + } + ); + testBothCases(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T00:00:05.015Z", + "2017-02-01T00:00:11.299Z", + "2017-02-01T00:00:11.074Z", + "2017-02-01T00:00:37.688Z", + "2017-02-01T00:00:37.210Z", + "2017-02-01T00:00:37.380Z" + ), + aggregation -> aggregation.fixedInterval(new DateHistogramInterval("1000ms")).field(DATE_FIELD).minDocCount(1L), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T00:00:37.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + } + ); + } + + public void testMinDocCountDeprecated() throws IOException { Query query = LongPoint.newRangeQuery(INSTANT_FIELD, asLong("2017-02-01T00:00:00.000Z"), asLong("2017-02-01T00:00:30.000Z")); List timestamps = Arrays.asList( "2017-02-01T00:00:05.015Z", @@ -355,6 +765,56 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { assertEquals(3, bucket.getDocCount()); } ); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + } + + public void testMinDocCount() throws IOException { + Query query = LongPoint.newRangeQuery(INSTANT_FIELD, asLong("2017-02-01T00:00:00.000Z"), asLong("2017-02-01T00:00:30.000Z")); + List timestamps = Arrays.asList( + "2017-02-01T00:00:05.015Z", + "2017-02-01T00:00:11.299Z", + "2017-02-01T00:00:11.074Z", + "2017-02-01T00:00:13.688Z", + "2017-02-01T00:00:21.380Z" + ); + + // 5 sec interval with minDocCount = 0 + testSearchAndReduceCase(query, timestamps, + aggregation -> aggregation.fixedInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD).minDocCount(0L), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(4, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T00:00:10.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T00:00:15.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-01T00:00:20.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + } + ); + + // 5 sec interval with minDocCount = 3 + testSearchAndReduceCase(query, timestamps, + aggregation -> aggregation.fixedInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD).minDocCount(3L), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(1, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:10.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + } + ); } public void testMaxBucket() throws IOException { @@ -365,6 +825,38 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { "2017-01-01T00:00:00.000Z" ); + expectThrows(TooManyBucketsException.class, () -> testSearchCase(query, timestamps, + aggregation -> aggregation.fixedInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD), + histogram -> {}, 2)); + + expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps, + aggregation -> aggregation.fixedInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD), + histogram -> {}, 2)); + + expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps, + aggregation -> aggregation.fixedInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD).minDocCount(0L), + histogram -> {}, 100)); + + expectThrows(TooManyBucketsException.class, () -> testSearchAndReduceCase(query, timestamps, + aggregation -> + aggregation.fixedInterval(DateHistogramInterval.seconds(5)) + .field(DATE_FIELD) + .subAggregation( + AggregationBuilders.dateHistogram("1") + .fixedInterval(DateHistogramInterval.seconds(5)) + .field(DATE_FIELD) + ), + histogram -> {}, 5)); + } + + public void testMaxBucketDeprecated() throws IOException { + Query query = new MatchAllDocsQuery(); + List timestamps = Arrays.asList( + "2010-01-01T00:00:00.000Z", + "2011-01-01T00:00:00.000Z", + "2017-01-01T00:00:00.000Z" + ); + expectThrows(TooManyBucketsException.class, () -> testSearchCase(query, timestamps, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.seconds(5)).field(DATE_FIELD), histogram -> {}, 2)); @@ -387,6 +879,232 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { .field(DATE_FIELD) ), histogram -> {}, 5)); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + } + + public void testFixedWithCalendar() throws IOException { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation.fixedInterval(DateHistogramInterval.WEEK).field(DATE_FIELD), + histogram -> {} + )); + assertThat(e.getMessage(), equalTo("failed to parse setting [date_histogram.fixedInterval] with value [1w] as a time value: " + + "unit is missing or unrecognized")); + } + + public void testCalendarWithFixed() throws IOException { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation.calendarInterval(new DateHistogramInterval("5d")).field(DATE_FIELD), + histogram -> {} + )); + assertThat(e.getMessage(), equalTo("The supplied interval [5d] could not be parsed as a calendar interval.")); + } + + public void testCalendarAndThenFixed() throws IOException { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .fixedInterval(new DateHistogramInterval("2d")) + .field(DATE_FIELD), + histogram -> {} + )); + assertThat(e.getMessage(), equalTo("Cannot use [fixed_interval] with [calendar_interval] configuration option.")); + } + + public void testFixedAndThenCalendar() throws IOException { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation.fixedInterval(new DateHistogramInterval("2d")) + .calendarInterval(DateHistogramInterval.DAY) + .field(DATE_FIELD), + histogram -> {} + )); + assertThat(e.getMessage(), equalTo("Cannot use [calendar_interval] with [fixed_interval] configuration option.")); + } + + public void testNewThenLegacy() throws IOException { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation.fixedInterval(new DateHistogramInterval("2d")) + .dateHistogramInterval(DateHistogramInterval.DAY) + .field(DATE_FIELD), + histogram -> {} + )); + assertThat(e.getMessage(), equalTo("Cannot use [interval] with [fixed_interval] or [calendar_interval] configuration options.")); + + e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .dateHistogramInterval(DateHistogramInterval.DAY) + .field(DATE_FIELD), + histogram -> {} + )); + assertThat(e.getMessage(), equalTo("Cannot use [interval] with [fixed_interval] or [calendar_interval] configuration options.")); + + e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation.fixedInterval(new DateHistogramInterval("2d")) + .interval(1000) + .field(DATE_FIELD), + histogram -> {} + )); + assertThat(e.getMessage(), equalTo("Cannot use [interval] with [fixed_interval] or [calendar_interval] configuration options.")); + + e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation.calendarInterval(DateHistogramInterval.DAY) + .interval(1000) + .field(DATE_FIELD), + histogram -> {} + )); + assertThat(e.getMessage(), equalTo("Cannot use [interval] with [fixed_interval] or [calendar_interval] configuration options.")); + } + + public void testLegacyThenNew() throws IOException { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation .dateHistogramInterval(DateHistogramInterval.DAY) + .fixedInterval(new DateHistogramInterval("2d")) + .field(DATE_FIELD), + histogram -> {} + )); + assertThat(e.getMessage(), equalTo("Cannot use [fixed_interval] with [interval] configuration option.")); + + e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.DAY) + .calendarInterval(DateHistogramInterval.DAY) + .field(DATE_FIELD), + histogram -> {} + )); + assertThat(e.getMessage(), equalTo("Cannot use [calendar_interval] with [interval] configuration option.")); + + e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation.interval(1000) + .fixedInterval(new DateHistogramInterval("2d")) + .field(DATE_FIELD), + histogram -> {} + )); + assertThat(e.getMessage(), equalTo("Cannot use [fixed_interval] with [interval] configuration option.")); + + e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation.interval(1000) + .calendarInterval(DateHistogramInterval.DAY) + .field(DATE_FIELD), + histogram -> {} + )); + assertThat(e.getMessage(), equalTo("Cannot use [calendar_interval] with [interval] configuration option.")); + + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + } + + public void testIllegalInterval() throws IOException { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> testSearchCase(new MatchAllDocsQuery(), + Collections.emptyList(), + aggregation -> aggregation.dateHistogramInterval(new DateHistogramInterval("foobar")).field(DATE_FIELD), + histogram -> {} + )); + assertThat(e.getMessage(), equalTo("Unable to parse interval [foobar]")); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } private void testSearchCase(Query query, List dataset, diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java index 1a639552ae4..38ed1776ec2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java @@ -45,29 +45,26 @@ public class DateHistogramTests extends BaseAggregationTestCase no rewrite assertNull(builder.rewriteTimeZone(shardContextThatDoesntCross)); @@ -179,7 +176,7 @@ public class DateHistogramTests extends BaseAggregationTestCase no rewrite - builder.dateHistogramInterval(DateHistogramInterval.MONTH); + builder.calendarInterval(DateHistogramInterval.MONTH); assertSame(tz, builder.rewriteTimeZone(shardContextThatDoesntCross)); assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses)); @@ -187,13 +184,13 @@ public class DateHistogramTests extends BaseAggregationTestCase no rewrite - builder.interval(1000L * 60 * 60 * 24 * 30); // ~ 1 month + builder.fixedInterval(new DateHistogramInterval(1000L * 60 * 60 * 24 * 30 + "ms")); // ~ 1 month assertSame(tz, builder.rewriteTimeZone(shardContextThatDoesntCross)); assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses)); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapperTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapperTests.java new file mode 100644 index 00000000000..36cab5b603a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapperTests.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class DateIntervalWrapperTests extends ESTestCase { + public void testValidOrdinals() { + assertThat(DateIntervalWrapper.IntervalTypeEnum.NONE.ordinal(), equalTo(0)); + assertThat(DateIntervalWrapper.IntervalTypeEnum.FIXED.ordinal(), equalTo(1)); + assertThat(DateIntervalWrapper.IntervalTypeEnum.CALENDAR.ordinal(), equalTo(2)); + assertThat(DateIntervalWrapper.IntervalTypeEnum.LEGACY_INTERVAL.ordinal(), equalTo(3)); + assertThat(DateIntervalWrapper.IntervalTypeEnum.LEGACY_DATE_HISTO.ordinal(), equalTo(4)); + } + + public void testwriteTo() throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + DateIntervalWrapper.IntervalTypeEnum.NONE.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(in.readVInt(), equalTo(0)); + } + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + DateIntervalWrapper.IntervalTypeEnum.FIXED.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(in.readVInt(), equalTo(1)); + } + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + DateIntervalWrapper.IntervalTypeEnum.CALENDAR.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(in.readVInt(), equalTo(2)); + } + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + DateIntervalWrapper.IntervalTypeEnum.LEGACY_INTERVAL.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(in.readVInt(), equalTo(3)); + } + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + DateIntervalWrapper.IntervalTypeEnum.LEGACY_DATE_HISTO.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(in.readVInt(), equalTo(4)); + } + } + + } + + public void testReadFrom() throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(0); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(DateIntervalWrapper.IntervalTypeEnum.fromStream(in), + equalTo(DateIntervalWrapper.IntervalTypeEnum.NONE)); + } + } + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(1); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(DateIntervalWrapper.IntervalTypeEnum.fromStream(in), + equalTo(DateIntervalWrapper.IntervalTypeEnum.FIXED)); + } + } + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(2); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(DateIntervalWrapper.IntervalTypeEnum.fromStream(in), + equalTo(DateIntervalWrapper.IntervalTypeEnum.CALENDAR)); + } + } + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(3); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(DateIntervalWrapper.IntervalTypeEnum.fromStream(in), + equalTo(DateIntervalWrapper.IntervalTypeEnum.LEGACY_INTERVAL)); + } + } + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(4); + try (StreamInput in = out.bytes().streamInput()) { + assertThat(DateIntervalWrapper.IntervalTypeEnum.fromStream(in), + equalTo(DateIntervalWrapper.IntervalTypeEnum.LEGACY_DATE_HISTO)); + } + } + } + + public void testInvalidReadFrom() throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeVInt(randomIntBetween(5, Integer.MAX_VALUE)); + try (StreamInput in = out.bytes().streamInput()) { + DateIntervalWrapper.IntervalTypeEnum.fromStream(in); + fail("Expected IOException"); + } catch(IOException e) { + assertThat(e.getMessage(), containsString("Unknown IntervalTypeEnum ordinal [")); + } + + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java index 627ca9c0af9..4f312a71a83 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java @@ -84,7 +84,7 @@ public class AvgBucketAggregatorTests extends AggregatorTestCase { AvgAggregationBuilder avgBuilder = new AvgAggregationBuilder("foo").field(VALUE_FIELD); DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("histo") - .dateHistogramInterval(DateHistogramInterval.YEAR) + .calendarInterval(DateHistogramInterval.YEAR) .field(DATE_FIELD) .subAggregation(new AvgAggregationBuilder("foo").field(VALUE_FIELD)); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java index e3475be5773..9d27663d275 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java @@ -84,7 +84,7 @@ public class CumulativeSumAggregatorTests extends AggregatorTestCase { Query query = new MatchAllDocsQuery(); DateHistogramAggregationBuilder aggBuilder = new DateHistogramAggregationBuilder("histo"); - aggBuilder.dateHistogramInterval(DateHistogramInterval.DAY).field(HISTO_FIELD); + aggBuilder.calendarInterval(DateHistogramInterval.DAY).field(HISTO_FIELD); aggBuilder.subAggregation(new AvgAggregationBuilder("the_avg").field(VALUE_FIELD)); aggBuilder.subAggregation(new CumulativeSumPipelineAggregationBuilder("cusum", "the_avg")); @@ -107,7 +107,7 @@ public class CumulativeSumAggregatorTests extends AggregatorTestCase { Query query = new MatchAllDocsQuery(); DateHistogramAggregationBuilder aggBuilder = new DateHistogramAggregationBuilder("histo"); - aggBuilder.dateHistogramInterval(DateHistogramInterval.DAY).field(HISTO_FIELD); + aggBuilder.calendarInterval(DateHistogramInterval.DAY).field(HISTO_FIELD); aggBuilder.subAggregation(new AvgAggregationBuilder("the_avg").field(VALUE_FIELD)); aggBuilder.subAggregation(new DerivativePipelineAggregationBuilder("the_deriv", "the_avg")); aggBuilder.subAggregation(new CumulativeSumPipelineAggregationBuilder("cusum", "the_deriv")); @@ -148,6 +148,7 @@ public class CumulativeSumAggregatorTests extends AggregatorTestCase { sum += 1.0; } }); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } public void testDocCount() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java index 1368db5ab71..27490fa202b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java @@ -83,7 +83,7 @@ public class MovFnUnitTests extends AggregatorTestCase { Script script = new Script(Script.DEFAULT_SCRIPT_TYPE, "painless", "test", Collections.emptyMap()); DateHistogramAggregationBuilder aggBuilder = new DateHistogramAggregationBuilder("histo"); - aggBuilder.dateHistogramInterval(DateHistogramInterval.DAY).field(DATE_FIELD); + aggBuilder.calendarInterval(DateHistogramInterval.DAY).field(DATE_FIELD); aggBuilder.subAggregation(new AvgAggregationBuilder("avg").field(VALUE_FIELD)); aggBuilder.subAggregation(new MovFnPipelineAggregationBuilder("mov_fn", "avg", script, 3)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java index bb1faeddd82..27938352ef4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java @@ -132,10 +132,17 @@ public final class ExtractorUtils { throw ExceptionsHelper.badRequestException("ML requires date_histogram.time_zone to be UTC"); } - if (dateHistogram.dateHistogramInterval() != null) { + // TODO retains `dateHistogramInterval()`/`interval()` access for bwc logic, needs updating + if (dateHistogram.getCalendarInterval() != null) { + return validateAndGetCalendarInterval(dateHistogram.getCalendarInterval().toString()); + } else if (dateHistogram.getFixedInterval() != null) { + return dateHistogram.getFixedInterval().estimateMillis(); + } else if (dateHistogram.dateHistogramInterval() != null) { return validateAndGetCalendarInterval(dateHistogram.dateHistogramInterval().toString()); - } else { + } else if (dateHistogram.interval() != 0) { return dateHistogram.interval(); + } else { + throw new IllegalArgumentException("Must specify an interval for DateHistogram"); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java index 93cf0cbeeb3..0fe47d96ffe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java @@ -146,7 +146,8 @@ public class RollupJobCaps implements Writeable, ToXContentObject { final DateHistogramGroupConfig dateHistogram = groupConfig.getDateHistogram(); final Map dateHistogramAggCap = new HashMap<>(); dateHistogramAggCap.put("agg", DateHistogramAggregationBuilder.NAME); - dateHistogramAggCap.put(DateHistogramGroupConfig.INTERVAL, dateHistogram.getInterval().toString()); + dateHistogramAggCap.put(dateHistogram.getIntervalTypeName(), dateHistogram.getInterval().toString()); + if (dateHistogram.getDelay() != null) { dateHistogramAggCap.put(DateHistogramGroupConfig.DELAY, dateHistogram.getDelay().toString()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java index c9fe0c644a8..4db5966671d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java @@ -50,17 +50,45 @@ public class DateHistogramGroupConfig implements Writeable, ToXContentObject { static final String NAME = "date_histogram"; public static final String INTERVAL = "interval"; - private static final String FIELD = "field"; + public static final String FIXED_INTERVAL = "fixed_interval"; + public static final String CALENDAR_INTERVAL = "calendar_interval"; public static final String TIME_ZONE = "time_zone"; public static final String DELAY = "delay"; - public static final String DEFAULT_TIMEZONE = "UTC"; + + private static final String DEFAULT_TIMEZONE = "UTC"; public static final ZoneId DEFAULT_ZONEID_TIMEZONE = ZoneOffset.UTC; + private static final String FIELD = "field"; + private static final String TYPE_NAME = "interval"; + private static final ConstructingObjectParser PARSER; static { - PARSER = new ConstructingObjectParser<>(NAME, a -> - new DateHistogramGroupConfig((String) a[0], (DateHistogramInterval) a[1], (DateHistogramInterval) a[2], (String) a[3])); + PARSER = new ConstructingObjectParser<>(NAME, a -> { + DateHistogramInterval oldInterval = (DateHistogramInterval) a[1]; + DateHistogramInterval calendarInterval = (DateHistogramInterval) a[2]; + DateHistogramInterval fixedInterval = (DateHistogramInterval) a[3]; + + if (oldInterval != null) { + if (calendarInterval != null || fixedInterval != null) { + throw new IllegalArgumentException("Cannot use [interval] with [fixed_interval] or [calendar_interval] " + + "configuration options."); + } + return fromUnknownTimeUnit((String) a[0], oldInterval, (DateHistogramInterval) a[4], (String) a[5]); + } else if (calendarInterval != null && fixedInterval == null) { + return new CalendarInterval((String) a[0], calendarInterval, (DateHistogramInterval) a[4], (String) a[5]); + } else if (calendarInterval == null && fixedInterval != null) { + return new FixedInterval((String) a[0], fixedInterval, (DateHistogramInterval) a[4], (String) a[5]); + } else if (calendarInterval != null && fixedInterval != null) { + throw new IllegalArgumentException("Cannot set both [fixed_interval] and [calendar_interval] at the same time"); + } else { + throw new IllegalArgumentException("An interval is required. Use [fixed_interval] or [calendar_interval]."); + } + }); PARSER.declareString(constructorArg(), new ParseField(FIELD)); - PARSER.declareField(constructorArg(), p -> new DateHistogramInterval(p.text()), new ParseField(INTERVAL), ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()), new ParseField(INTERVAL), ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()), + new ParseField(CALENDAR_INTERVAL), ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()), + new ParseField(FIXED_INTERVAL), ValueType.STRING); PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()), new ParseField(DELAY), ValueType.STRING); PARSER.declareString(optionalConstructorArg(), new ParseField(TIME_ZONE)); } @@ -71,8 +99,97 @@ public class DateHistogramGroupConfig implements Writeable, ToXContentObject { private final String timeZone; /** - * Create a new {@link DateHistogramGroupConfig} using the given field and interval parameters. + * FixedInterval is a {@link DateHistogramGroupConfig} that uses a fixed time interval for rolling up data. + * The fixed time interval is one or multiples of SI units and has no calendar-awareness (e.g. doesn't account + * for leap corrections, does not have variable length months, etc). + * + * For calendar-aware rollups, use {@link CalendarInterval} */ + public static class FixedInterval extends DateHistogramGroupConfig { + private static final String TYPE_NAME = "fixed_interval"; + public FixedInterval(String field, DateHistogramInterval interval) { + this(field, interval, null, null); + } + + public FixedInterval(String field, DateHistogramInterval interval, DateHistogramInterval delay, String timeZone) { + super(field, interval, delay, timeZone); + // validate fixed time + TimeValue.parseTimeValue(interval.toString(), NAME + ".FixedInterval"); + } + + FixedInterval(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getIntervalTypeName() { + return TYPE_NAME; + } + } + + /** + * CalendarInterval is a {@link DateHistogramGroupConfig} that uses calendar-aware intervals for rolling up data. + * Calendar time intervals understand leap corrections and contextual differences in certain calendar units (e.g. + * months are variable length depending on the month). Calendar units are only available in singular quantities: + * 1s, 1m, 1h, 1d, 1w, 1q, 1M, 1y + * + * For fixed time rollups, use {@link FixedInterval} + */ + public static class CalendarInterval extends DateHistogramGroupConfig { + private static final String TYPE_NAME = "calendar_interval"; + public CalendarInterval(String field, DateHistogramInterval interval) { + this(field, interval, null, null); + } + + public CalendarInterval(String field, DateHistogramInterval interval, DateHistogramInterval delay, String timeZone) { + super(field, interval, delay, timeZone); + if (DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(interval.toString()) == null) { + throw new IllegalArgumentException("The supplied interval [" + interval +"] could not be parsed " + + "as a calendar interval."); + } + } + + CalendarInterval(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getIntervalTypeName() { + return TYPE_NAME; + } + } + + /** + * This helper can be used to "up-convert" a legacy job date histo config stored with plain "interval" into + * one of the new Fixed or Calendar intervals. It follows the old behavior where the interval is first + * parsed with the calendar logic, and if that fails, it is assumed to be a fixed interval + */ + private static DateHistogramGroupConfig fromUnknownTimeUnit(String field, DateHistogramInterval interval, + DateHistogramInterval delay, String timeZone) { + if (DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(interval.toString()) != null) { + return new CalendarInterval(field, interval, delay, timeZone); + } else { + return new FixedInterval(field, interval, delay, timeZone); + } + } + + static DateHistogramGroupConfig fromUnknownTimeUnit(StreamInput in) throws IOException { + DateHistogramInterval interval = new DateHistogramInterval(in); + String field = in.readString(); + DateHistogramInterval delay = in.readOptionalWriteable(DateHistogramInterval::new); + String timeZone = in.readString(); + return fromUnknownTimeUnit(field, interval, delay, timeZone); + } + + /** + * Create a new {@link DateHistogramGroupConfig} using the given field and interval parameters. + * + * @deprecated Build a DateHistoConfig using {@link DateHistogramGroupConfig.CalendarInterval} + * or {@link DateHistogramGroupConfig.FixedInterval} instead + * + * @since 7.2.0 + */ + @Deprecated public DateHistogramGroupConfig(final String field, final DateHistogramInterval interval) { this(field, interval, null, null); } @@ -89,7 +206,13 @@ public class DateHistogramGroupConfig implements Writeable, ToXContentObject { * @param interval the interval to use for the date histogram (required) * @param delay the time delay (optional) * @param timeZone the id of time zone to use to calculate the date histogram (optional). When {@code null}, the UTC timezone is used. + * + * @deprecated Build a DateHistoConfig using {@link DateHistogramGroupConfig.CalendarInterval} + * or {@link DateHistogramGroupConfig.FixedInterval} instead + * + * @since 7.2.0 */ + @Deprecated public DateHistogramGroupConfig(final String field, final DateHistogramInterval interval, final @Nullable DateHistogramInterval delay, @@ -114,6 +237,13 @@ public class DateHistogramGroupConfig implements Writeable, ToXContentObject { } } + /** + * @deprecated Build a DateHistoConfig using {@link DateHistogramGroupConfig.CalendarInterval} + * or {@link DateHistogramGroupConfig.FixedInterval} instead + * + * @since 7.2.0 + */ + @Deprecated DateHistogramGroupConfig(final StreamInput in) throws IOException { interval = new DateHistogramInterval(in); field = in.readString(); @@ -133,7 +263,7 @@ public class DateHistogramGroupConfig implements Writeable, ToXContentObject { public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { builder.startObject(); { - builder.field(INTERVAL, interval.toString()); + builder.field(getIntervalTypeName(), interval.toString()); builder.field(FIELD, field); if (delay != null) { builder.field(DELAY, delay.toString()); @@ -178,6 +308,10 @@ public class DateHistogramGroupConfig implements Writeable, ToXContentObject { return createRounding(interval.toString(), timeZone); } + public String getIntervalTypeName() { + return TYPE_NAME; + } + public void validateMappings(Map> fieldCapsResponse, ActionRequestValidationException validationException) { @@ -205,7 +339,7 @@ public class DateHistogramGroupConfig implements Writeable, ToXContentObject { if (this == other) { return true; } - if (other == null || getClass() != other.getClass()) { + if (other == null || other instanceof DateHistogramGroupConfig == false) { return false; } final DateHistogramGroupConfig that = (DateHistogramGroupConfig) other; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/GroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/GroupConfig.java index b7c69ecda0e..1fad03473d3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/GroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/GroupConfig.java @@ -76,7 +76,7 @@ public class GroupConfig implements Writeable, ToXContentObject { } public GroupConfig(final StreamInput in) throws IOException { - dateHistogram = new DateHistogramGroupConfig(in); + dateHistogram = DateHistogramGroupConfig.fromUnknownTimeUnit(in); histogram = in.readOptionalWriteable(HistogramGroupConfig::new); terms = in.readOptionalWriteable(TermsGroupConfig::new); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionResponseTests.java index ba40717959c..383a4a7d62e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsActionResponseTests.java @@ -40,4 +40,6 @@ public class GetDatafeedsActionResponseTests extends AbstractStreamableTestCase< SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); return new NamedWriteableRegistry(searchModule.getNamedWriteables()); } + + } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index 71491c92277..6b664777a2d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -70,6 +70,22 @@ import static org.hamcrest.Matchers.not; public class DatafeedConfigTests extends AbstractSerializingTestCase { + @AwaitsFix(bugUrl = "Tests need to be updated to use calendar/fixed interval explicitly") + public void testIntervalWarnings() { + /* + Placeholder test for visibility. Datafeeds use calendar and fixed intervals through the deprecated + methods. The randomized creation + final superclass tests made it impossible to add warning assertions, + so warnings have been disabled on this test. + + When fixed, `enableWarningsCheck()` should be removed. + */ + } + + @Override + protected boolean enableWarningsCheck() { + return false; + } + @Override protected DatafeedConfig createTestInstance() { return createRandomizedDatafeedConfig(randomAlphaOfLength(10)); @@ -110,7 +126,7 @@ public class DatafeedConfigTests extends AbstractSerializingTestCase createDatafeedWithDateHistogram((String) null)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java index 62436172d92..571c9e81a90 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdateTests.java @@ -54,6 +54,22 @@ import static org.hamcrest.Matchers.not; public class DatafeedUpdateTests extends AbstractSerializingTestCase { + @AwaitsFix(bugUrl = "Tests need to be updated to use calendar/fixed interval explicitly") + public void testIntervalWarnings() { + /* + Placeholder test for visibility. Datafeeds use calendar and fixed intervals through the deprecated + methods. The randomized creation + final superclass tests made it impossible to add warning assertions, + so warnings have been disabled on this test. + + When fixed, `enableWarningsCheck()` should be removed. + */ + } + + @Override + protected boolean enableWarningsCheck() { + return false; + } + @Override protected DatafeedUpdate createTestInstance() { return createRandomized(DatafeedConfigTests.randomValidDatafeedId()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java index 532468216e5..6e11728cdab 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; @@ -79,13 +80,26 @@ public class ExtractorUtilsTests extends ESTestCase { () -> ExtractorUtils.getHistogramIntervalMillis(dateHistogram)); assertThat(e.getMessage(), equalTo("ML requires date_histogram.time_zone to be UTC")); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] " + + "or [calendar_interval] in the future."); + } + + public void testGetHistogramIntervalMillis_GivenUtcTimeZonesDeprecated() { + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + ZoneId zone = randomFrom(ZoneOffset.UTC, ZoneId.of("UTC")); + DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("bucket").field("time") + .interval(300000L).timeZone(zone).subAggregation(maxTime); + assertThat(ExtractorUtils.getHistogramIntervalMillis(dateHistogram), is(300_000L)); + + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] " + + "or [calendar_interval] in the future."); } public void testGetHistogramIntervalMillis_GivenUtcTimeZones() { MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); ZoneId zone = randomFrom(ZoneOffset.UTC, ZoneId.of("UTC")); DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("bucket").field("time") - .interval(300000L).timeZone(zone).subAggregation(maxTime); + .fixedInterval(new DateHistogramInterval("300000ms")).timeZone(zone).subAggregation(maxTime); assertThat(ExtractorUtils.getHistogramIntervalMillis(dateHistogram), is(300_000L)); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java index 605ea6e901a..3535cb1ed55 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.rollup; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; @@ -69,12 +70,33 @@ public class ConfigTestHelpers { public static DateHistogramGroupConfig randomDateHistogramGroupConfig(final Random random) { final String field = randomField(random); - final DateHistogramInterval interval = randomInterval(); final DateHistogramInterval delay = random.nextBoolean() ? randomInterval() : null; - String timezone = random.nextBoolean() ? randomZone().getId() : null; - return new DateHistogramGroupConfig(field, interval, delay, timezone); + final String timezone = random.nextBoolean() ? randomZone().getId() : null; + if (random.nextBoolean()) { + return new DateHistogramGroupConfig.FixedInterval(field, randomInterval(), delay, timezone); + } else { + int i = random.nextInt(DateHistogramAggregationBuilder.DATE_FIELD_UNITS.size()); + List units = new ArrayList<>(DateHistogramAggregationBuilder.DATE_FIELD_UNITS.keySet()); + Collections.shuffle(units, random); + return new DateHistogramGroupConfig.CalendarInterval(field, new DateHistogramInterval(units.get(0)), delay, timezone); + } } + public static DateHistogramGroupConfig randomLegacyDateHistogramGroupConfig(final Random random) { + final String field = randomField(random); + final DateHistogramInterval delay = random.nextBoolean() ? randomInterval() : null; + final String timezone = random.nextBoolean() ? randomZone().getId() : null; + if (random.nextBoolean()) { + return new DateHistogramGroupConfig(field, randomInterval(), delay, timezone); + } else { + int i = random.nextInt(DateHistogramAggregationBuilder.DATE_FIELD_UNITS.size()); + List units = new ArrayList<>(DateHistogramAggregationBuilder.DATE_FIELD_UNITS.keySet()); + Collections.shuffle(units, random); + return new DateHistogramGroupConfig(field, new DateHistogramInterval(units.get(0)), delay, timezone); + } + } + + public static List getFields() { return IntStream.range(0, ESTestCase.randomIntBetween(1, 10)) .mapToObj(n -> ESTestCase.randomAlphaOfLengthBetween(5, 10)) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java index 95df682ff5e..65844e9e1ca 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java @@ -27,6 +27,12 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class DateHistogramGroupConfigSerializingTests extends AbstractSerializingTestCase { + + private enum DateHistoType { + LEGACY, FIXED, CALENDAR + } + private static DateHistoType type; + @Override protected DateHistogramGroupConfig doParseInstance(final XContentParser parser) throws IOException { return DateHistogramGroupConfig.fromXContent(parser); @@ -34,19 +40,33 @@ public class DateHistogramGroupConfigSerializingTests extends AbstractSerializin @Override protected Writeable.Reader instanceReader() { + if (type.equals(DateHistoType.FIXED)) { + return DateHistogramGroupConfig.FixedInterval::new; + } else if (type.equals(DateHistoType.CALENDAR)) { + return DateHistogramGroupConfig.CalendarInterval::new; + } return DateHistogramGroupConfig::new; } @Override protected DateHistogramGroupConfig createTestInstance() { - return randomDateHistogramGroupConfig(random()); + DateHistogramGroupConfig config = randomDateHistogramGroupConfig(random()); + if (config.getClass().equals(DateHistogramGroupConfig.FixedInterval.class)) { + type = DateHistoType.FIXED; + } else if (config.getClass().equals(DateHistogramGroupConfig.CalendarInterval.class)) { + type = DateHistoType.CALENDAR; + } else { + type = DateHistoType.LEGACY; + } + return config; } public void testValidateNoMapping() { ActionRequestValidationException e = new ActionRequestValidationException(); Map> responseMap = new HashMap<>(); - DateHistogramGroupConfig config = new DateHistogramGroupConfig("my_field", new DateHistogramInterval("1d"), null, null); + DateHistogramGroupConfig config = new DateHistogramGroupConfig.CalendarInterval("my_field", + new DateHistogramInterval("1d"), null, null); config.validateMappings(responseMap, e); assertThat(e.validationErrors().get(0), equalTo("Could not find a [date] field with name [my_field] in any of the " + "indices matching the index pattern.")); @@ -60,7 +80,8 @@ public class DateHistogramGroupConfigSerializingTests extends AbstractSerializin FieldCapabilities fieldCaps = mock(FieldCapabilities.class); responseMap.put("some_other_field", Collections.singletonMap("date", fieldCaps)); - DateHistogramGroupConfig config = new DateHistogramGroupConfig("my_field", new DateHistogramInterval("1d"), null, null); + DateHistogramGroupConfig config = new DateHistogramGroupConfig.CalendarInterval("my_field", + new DateHistogramInterval("1d"), null, null); config.validateMappings(responseMap, e); assertThat(e.validationErrors().get(0), equalTo("Could not find a [date] field with name [my_field] in any of the " + "indices matching the index pattern.")); @@ -74,7 +95,8 @@ public class DateHistogramGroupConfigSerializingTests extends AbstractSerializin FieldCapabilities fieldCaps = mock(FieldCapabilities.class); responseMap.put("my_field", Collections.singletonMap("keyword", fieldCaps)); - DateHistogramGroupConfig config = new DateHistogramGroupConfig("my_field", new DateHistogramInterval("1d"), null, null); + DateHistogramGroupConfig config = new DateHistogramGroupConfig.CalendarInterval("my_field", + new DateHistogramInterval("1d"), null, null); config.validateMappings(responseMap, e); assertThat(e.validationErrors().get(0), equalTo("The field referenced by a date_histo group must be a [date] type across all " + "indices in the index pattern. Found: [keyword] for field [my_field]")); @@ -91,7 +113,8 @@ public class DateHistogramGroupConfigSerializingTests extends AbstractSerializin types.put("keyword", fieldCaps); responseMap.put("my_field", types); - DateHistogramGroupConfig config = new DateHistogramGroupConfig("my_field", new DateHistogramInterval("1d"), null, null); + DateHistogramGroupConfig config = new DateHistogramGroupConfig.CalendarInterval("my_field", + new DateHistogramInterval("1d"), null, null); config.validateMappings(responseMap, e); assertThat(e.validationErrors().get(0), equalTo("The field referenced by a date_histo group must be a [date] type across all " + "indices in the index pattern. Found: [date, keyword] for field [my_field]")); @@ -106,7 +129,8 @@ public class DateHistogramGroupConfigSerializingTests extends AbstractSerializin when(fieldCaps.isAggregatable()).thenReturn(false); responseMap.put("my_field", Collections.singletonMap("date", fieldCaps)); - DateHistogramGroupConfig config =new DateHistogramGroupConfig("my_field", new DateHistogramInterval("1d"), null, null); + DateHistogramGroupConfig config =new DateHistogramGroupConfig.CalendarInterval("my_field", + new DateHistogramInterval("1d"), null, null); config.validateMappings(responseMap, e); assertThat(e.validationErrors().get(0), equalTo("The field [my_field] must be aggregatable across all indices, but is not.")); } @@ -120,7 +144,8 @@ public class DateHistogramGroupConfigSerializingTests extends AbstractSerializin when(fieldCaps.isAggregatable()).thenReturn(true); responseMap.put("my_field", Collections.singletonMap("date", fieldCaps)); - DateHistogramGroupConfig config = new DateHistogramGroupConfig("my_field", new DateHistogramInterval("1d"), null, null); + DateHistogramGroupConfig config = new DateHistogramGroupConfig.CalendarInterval("my_field", + new DateHistogramInterval("1d"), null, null); config.validateMappings(responseMap, e); assertThat(e.validationErrors().size(), equalTo(0)); } @@ -134,7 +159,8 @@ public class DateHistogramGroupConfigSerializingTests extends AbstractSerializin when(fieldCaps.isAggregatable()).thenReturn(true); responseMap.put("my_field", Collections.singletonMap("date", fieldCaps)); - DateHistogramGroupConfig config = new DateHistogramGroupConfig("my_field", new DateHistogramInterval("1w"), null, null); + DateHistogramGroupConfig config = new DateHistogramGroupConfig.CalendarInterval("my_field", + new DateHistogramInterval("1w"), null, null); config.validateMappings(responseMap, e); assertThat(e.validationErrors().size(), equalTo(0)); } @@ -145,7 +171,7 @@ public class DateHistogramGroupConfigSerializingTests extends AbstractSerializin */ public void testBwcSerialization() throws IOException { for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { - final DateHistogramGroupConfig reference = ConfigTestHelpers.randomDateHistogramGroupConfig(random()); + final DateHistogramGroupConfig reference = ConfigTestHelpers.randomLegacyDateHistogramGroupConfig(random()); final BytesStreamOutput out = new BytesStreamOutput(); reference.writeTo(out); @@ -179,4 +205,44 @@ public class DateHistogramGroupConfigSerializingTests extends AbstractSerializin assertEqualInstances(new DateHistogramGroupConfig(field, interval, delay, timezone.getId()), deserialized); } } + + /** + * Tests that old DateHistogramGroupConfigs can be serialized/deserialized + * into the specialized Fixed/Calendar versions + */ + public void testLegacyConfigBWC() throws IOException { + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + // Serialize the old format + final DateHistogramGroupConfig reference = ConfigTestHelpers.randomLegacyDateHistogramGroupConfig(random()); + + final BytesStreamOutput out = new BytesStreamOutput(); + reference.writeTo(out); + final StreamInput in = out.bytes().streamInput(); + + // Deserialize the new format + DateHistogramGroupConfig test = DateHistogramGroupConfig.fromUnknownTimeUnit(in); + + assertThat(reference.getInterval(), equalTo(test.getInterval())); + assertThat(reference.getField(), equalTo(test.getField())); + assertThat(reference.getTimeZone(), equalTo(test.getTimeZone())); + assertThat(reference.getDelay(), equalTo(test.getDelay())); + } + + for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { + // Serialize the new format + final DateHistogramGroupConfig reference = ConfigTestHelpers.randomDateHistogramGroupConfig(random()); + + final BytesStreamOutput out = new BytesStreamOutput(); + reference.writeTo(out); + final StreamInput in = out.bytes().streamInput(); + + // Deserialize the old format + DateHistogramGroupConfig test = new DateHistogramGroupConfig(in); + + assertThat(reference.getInterval(), equalTo(test.getInterval())); + assertThat(reference.getField(), equalTo(test.getField())); + assertThat(reference.getTimeZone(), equalTo(test.getTimeZone())); + assertThat(reference.getDelay(), equalTo(test.getDelay())); + } + } } diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 9169ad5d93f..770eaec7bd1 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -226,10 +226,14 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + "}"; createDataframeTransformRequest.setJsonEntity(config); + createDataframeTransformRequest.setOptions(expectWarnings("[interval] on [date_histogram] is deprecated, " + + "use [fixed_interval] or [calendar_interval] in the future.")); + Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS, + "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); assertTrue(indexExists(dataFrameIndex)); Map indexStats = getAsMap(dataFrameIndex + "/_stats"); @@ -257,6 +261,9 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + " } } } }" + "}"; createPreviewRequest.setJsonEntity(config); + createPreviewRequest.setOptions(expectWarnings("[interval] on [date_histogram] is deprecated, " + + "use [fixed_interval] or [calendar_interval] in the future.")); + Map previewDataframeResponse = entityAsMap(client().performRequest(createPreviewRequest)); List> preview = (List>)previewDataframeResponse.get("preview"); // preview is limited to 100 @@ -298,10 +305,13 @@ public class DataFramePivotRestIT extends DataFrameRestTestCase { + "}"; createDataframeTransformRequest.setJsonEntity(config); + createDataframeTransformRequest.setOptions(expectWarnings("[interval] on [date_histogram] is deprecated, " + + "use [fixed_interval] or [calendar_interval] in the future.")); Map createDataframeTransformResponse = entityAsMap(client().performRequest(createDataframeTransformRequest)); assertThat(createDataframeTransformResponse.get("acknowledged"), equalTo(Boolean.TRUE)); - startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS); + startAndWaitForTransform(transformId, dataFrameIndex, BASIC_AUTH_VALUE_DATA_FRAME_ADMIN_WITH_SOME_DATA_ACCESS, + "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); assertTrue(indexExists(dataFrameIndex)); // we expect 21 documents as there shall be 21 days worth of docs diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index 2de6a248858..baa1cbf678f 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -182,10 +182,13 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { startDataframeTransform(transformId, force, null); } - protected void startDataframeTransform(String transformId, boolean force, String authHeader) throws IOException { + protected void startDataframeTransform(String transformId, boolean force, String authHeader, String... warnings) throws IOException { // start the transform final Request startTransformRequest = createRequestWithAuth("POST", DATAFRAME_ENDPOINT + transformId + "/_start", authHeader); startTransformRequest.addParameter(DataFrameField.FORCE.getPreferredName(), Boolean.toString(force)); + if (warnings.length > 0) { + startTransformRequest.setOptions(expectWarnings(warnings)); + } Map startTransformResponse = entityAsMap(client().performRequest(startTransformRequest)); assertThat(startTransformResponse.get("started"), equalTo(Boolean.TRUE)); } @@ -204,8 +207,13 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { } protected void startAndWaitForTransform(String transformId, String dataFrameIndex, String authHeader) throws Exception { + startAndWaitForTransform(transformId, dataFrameIndex, authHeader, new String[0]); + } + + protected void startAndWaitForTransform(String transformId, String dataFrameIndex, + String authHeader, String... warnings) throws Exception { // start the transform - startDataframeTransform(transformId, false, authHeader); + startDataframeTransform(transformId, false, authHeader, warnings); assertTrue(indexExists(dataFrameIndex)); // wait until the dataframe has been created and all data is available waitForDataFrameCheckpoint(transformId); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java index 5c1c61a5265..20ea84502ed 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/PivotTests.java @@ -83,6 +83,16 @@ public class PivotTests extends ESTestCase { return namedXContentRegistry; } + + /* + Had to disable warnings because tests get random date histo configs, and changing to + new interval format was non-trivial. Best for ML team to fix + */ + @Override + protected boolean enableWarningsCheck() { + return false; + } + public void testValidateExistingIndex() throws Exception { SourceConfig source = new SourceConfig(new String[]{"existing_source_index"}, QueryConfig.matchAll()); Pivot pivot = new Pivot(getValidPivotConfig()); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java index 8c5f5cf1e39..426b58f6864 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/DatafeedJobsRestIT.java @@ -391,7 +391,7 @@ public class DatafeedJobsRestIT extends ESRestTestCase { + " \"groups\" : {\n" + " \"date_histogram\": {\n" + " \"field\": \"time stamp\",\n" - + " \"interval\": \"2m\",\n" + + " \"fixed_interval\": \"2m\",\n" + " \"delay\": \"7d\"\n" + " },\n" + " \"terms\": {\n" @@ -412,7 +412,7 @@ public class DatafeedJobsRestIT extends ESRestTestCase { client().performRequest(createRollupRequest); String datafeedId = "datafeed-" + jobId; - String aggregations = "{\"buckets\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":3600000}," + String aggregations = "{\"buckets\":{\"date_histogram\":{\"field\":\"time stamp\",\"fixed_interval\":\"3600000ms\"}," + "\"aggregations\":{" + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + "\"responsetime\":{\"avg\":{\"field\":\"responsetime\"}}}}}"; @@ -524,7 +524,7 @@ public class DatafeedJobsRestIT extends ESRestTestCase { client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; - String aggregations = "{\"time stamp\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":\"1h\"}," + String aggregations = "{\"time stamp\":{\"date_histogram\":{\"field\":\"time stamp\",\"calendar_interval\":\"1h\"}," + "\"aggregations\":{" + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + "\"airline\":{\"terms\":{\"field\":\"airline\",\"size\":10}," @@ -564,7 +564,7 @@ public class DatafeedJobsRestIT extends ESRestTestCase { String datafeedId = "datafeed-" + jobId; String aggregations = "{\"hostname\": {\"terms\" : {\"field\": \"host.keyword\", \"size\":10}," - + "\"aggs\": {\"buckets\": {\"date_histogram\":{\"field\":\"timestamp\",\"interval\":\"60s\"}," + + "\"aggs\": {\"buckets\": {\"date_histogram\":{\"field\":\"timestamp\",\"fixed_interval\":\"60s\"}," + "\"aggs\": {\"timestamp\":{\"max\":{\"field\":\"timestamp\"}}," + "\"bytes-delta\":{\"derivative\":{\"buckets_path\":\"avg_bytes_out\"}}," + "\"avg_bytes_out\":{\"avg\":{\"field\":\"network_bytes_out\"}} }}}}}"; @@ -610,7 +610,7 @@ public class DatafeedJobsRestIT extends ESRestTestCase { String datafeedId = "datafeed-" + jobId; String aggregations = "{\"hostname\": {\"terms\" : {\"field\": \"host.keyword\", \"size\":10}," - + "\"aggs\": {\"buckets\": {\"date_histogram\":{\"field\":\"timestamp\",\"interval\":\"5s\"}," + + "\"aggs\": {\"buckets\": {\"date_histogram\":{\"field\":\"timestamp\",\"fixed_interval\":\"5s\"}," + "\"aggs\": {\"timestamp\":{\"max\":{\"field\":\"timestamp\"}}," + "\"bytes-delta\":{\"derivative\":{\"buckets_path\":\"avg_bytes_out\"}}," + "\"avg_bytes_out\":{\"avg\":{\"field\":\"network_bytes_out\"}} }}}}}"; @@ -652,7 +652,7 @@ public class DatafeedJobsRestIT extends ESRestTestCase { String datafeedId = "datafeed-" + jobId; String aggregations = "{\"hostname\": {\"terms\" : {\"field\": \"host.keyword\", \"size\":10}," - + "\"aggs\": {\"buckets\": {\"date_histogram\":{\"field\":\"timestamp\",\"interval\":\"5s\"}," + + "\"aggs\": {\"buckets\": {\"date_histogram\":{\"field\":\"timestamp\",\"fixed_interval\":\"5s\"}," + "\"aggs\": {\"timestamp\":{\"max\":{\"field\":\"timestamp\"}}," + "\"bytes-delta\":{\"derivative\":{\"buckets_path\":\"avg_bytes_out\"}}," + "\"avg_bytes_out\":{\"avg\":{\"field\":\"network_bytes_out\"}} }}}}}"; @@ -706,7 +706,7 @@ public class DatafeedJobsRestIT extends ESRestTestCase { client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; - String aggregations = "{\"buckets\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":\"15m\"}," + String aggregations = "{\"buckets\":{\"date_histogram\":{\"field\":\"time stamp\",\"fixed_interval\":\"15m\"}," + "\"aggregations\":{" + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + "\"airlines\":{\"terms\":{\"field\":\"airline.keyword\",\"size\":10}}," @@ -759,7 +759,7 @@ public class DatafeedJobsRestIT extends ESRestTestCase { + " \"groups\" : {\n" + " \"date_histogram\": {\n" + " \"field\": \"time stamp\",\n" - + " \"interval\": \"2m\",\n" + + " \"fixed_interval\": \"2m\",\n" + " \"delay\": \"7d\"\n" + " },\n" + " \"terms\": {\n" @@ -797,7 +797,7 @@ public class DatafeedJobsRestIT extends ESRestTestCase { client().performRequest(refreshRollupIndex); String datafeedId = "datafeed-" + jobId; - String aggregations = "{\"buckets\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":3600000}," + String aggregations = "{\"buckets\":{\"date_histogram\":{\"field\":\"time stamp\",\"fixed_interval\":\"3600000ms\"}," + "\"aggregations\":{" + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + "\"responsetime\":{\"avg\":{\"field\":\"responsetime\"}}}}}"; @@ -844,7 +844,7 @@ public class DatafeedJobsRestIT extends ESRestTestCase { + " \"groups\" : {\n" + " \"date_histogram\": {\n" + " \"field\": \"time stamp\",\n" - + " \"interval\": \"2m\",\n" + + " \"fixed_interval\": \"2m\",\n" + " \"delay\": \"7d\"\n" + " },\n" + " \"terms\": {\n" @@ -865,7 +865,7 @@ public class DatafeedJobsRestIT extends ESRestTestCase { client().performRequest(createRollupRequest); String datafeedId = "datafeed-" + jobId; - String aggregations = "{\"buckets\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":3600000}," + String aggregations = "{\"buckets\":{\"date_histogram\":{\"field\":\"time stamp\",\"fixed_interval\":\"3600000ms\"}," + "\"aggregations\":{" + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + "\"responsetime\":{\"avg\":{\"field\":\"responsetime\"}}}}}"; @@ -914,7 +914,7 @@ public class DatafeedJobsRestIT extends ESRestTestCase { client().performRequest(createJobRequest); String datafeedId = "datafeed-" + jobId; - String aggregations = "{\"time stamp\":{\"date_histogram\":{\"field\":\"time stamp\",\"interval\":\"1h\"}," + String aggregations = "{\"time stamp\":{\"date_histogram\":{\"field\":\"time stamp\",\"calendar_interval\":\"1h\"}," + "\"aggregations\":{" + "\"time stamp\":{\"max\":{\"field\":\"time stamp\"}}," + "\"airlineFilter\":{\"filter\":{\"term\": {\"airline\":\"AAA\"}}," diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java index 744076320b6..e3af3b7ac64 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetOverallBucketsAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.Min; @@ -278,7 +279,7 @@ public class TransportGetOverallBucketsAction extends HandledTransportAction checkCurrentBucketEventCount(long start, long end) { SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder() .size(0) - .aggregation(new DateHistogramAggregationBuilder(DATE_BUCKETS).interval(bucketSpan).field(timeField)) + .aggregation(new DateHistogramAggregationBuilder(DATE_BUCKETS) + .fixedInterval(new DateHistogramInterval(bucketSpan + "ms")).field(timeField)) .query(ExtractorUtils.wrapInTimeRangeQuery(datafeedQuery, timeField, start, end)); SearchRequest searchRequest = new SearchRequest(datafeedIndices).source(searchSourceBuilder); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java index 8264d3e15fd..7a66ff49d62 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/RollupDataExtractorFactory.java @@ -207,7 +207,16 @@ public class RollupDataExtractorFactory implements DataExtractorFactory { if (datehistogramAgg == null) { return null; } - return (String)datehistogramAgg.get(DateHistogramGroupConfig.INTERVAL); + if (datehistogramAgg.get(DateHistogramGroupConfig.INTERVAL) != null) { + return (String)datehistogramAgg.get(DateHistogramGroupConfig.INTERVAL); + } + if (datehistogramAgg.get(DateHistogramGroupConfig.CALENDAR_INTERVAL) != null) { + return (String)datehistogramAgg.get(DateHistogramGroupConfig.CALENDAR_INTERVAL); + } + if (datehistogramAgg.get(DateHistogramGroupConfig.FIXED_INTERVAL) != null) { + return (String)datehistogramAgg.get(DateHistogramGroupConfig.FIXED_INTERVAL); + } + return null; } private String getTimezone() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java index dee28e71a7b..ed24af15962 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/DataExtractorFactoryTests.java @@ -214,7 +214,10 @@ public class DataExtractorFactoryTests extends ESTestCase { datafeedConfig.setParsedAggregations(AggregatorFactories.builder().addAggregator( AggregationBuilders.dateHistogram("time").interval(600_000).subAggregation(maxTime).subAggregation(myTerm).field("time"))); ActionListener listener = ActionListener.wrap( - dataExtractorFactory -> assertThat(dataExtractorFactory, instanceOf(RollupDataExtractorFactory.class)), + dataExtractorFactory -> { + assertThat(dataExtractorFactory, instanceOf(RollupDataExtractorFactory.class)); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + }, e -> fail() ); DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); @@ -234,7 +237,10 @@ public class DataExtractorFactoryTests extends ESTestCase { datafeedConfig.setParsedAggregations(AggregatorFactories.builder().addAggregator( AggregationBuilders.dateHistogram("time").interval(600_000).subAggregation(maxTime).subAggregation(myTerm).field("time"))); ActionListener listener = ActionListener.wrap( - dataExtractorFactory -> assertThat(dataExtractorFactory, instanceOf(ChunkedDataExtractorFactory.class)), + dataExtractorFactory -> { + assertThat(dataExtractorFactory, instanceOf(ChunkedDataExtractorFactory.class)); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); + }, e -> fail() ); DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); @@ -280,6 +286,7 @@ public class DataExtractorFactoryTests extends ESTestCase { containsString("Rollup capabilities do not have a [date_histogram] aggregation with an interval " + "that is a multiple of the datafeed's interval.")); assertThat(e, instanceOf(IllegalArgumentException.class)); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } ); DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); @@ -304,6 +311,7 @@ public class DataExtractorFactoryTests extends ESTestCase { assertThat(e.getMessage(), containsString("Rollup capabilities do not support all the datafeed aggregations at the desired interval.")); assertThat(e, instanceOf(IllegalArgumentException.class)); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } ); DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); @@ -328,6 +336,7 @@ public class DataExtractorFactoryTests extends ESTestCase { assertThat(e.getMessage(), containsString("Rollup capabilities do not support all the datafeed aggregations at the desired interval.")); assertThat(e, instanceOf(IllegalArgumentException.class)); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future."); } ); DataExtractorFactory.create(client, datafeedConfig.build(), jobBuilder.build(new Date()), xContentRegistry(), listener); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java index 95b5069edcf..8c2a0672678 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java @@ -25,6 +25,10 @@ import java.util.List; import java.util.Map; import java.util.Set; +import static org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig.CALENDAR_INTERVAL; +import static org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig.FIXED_INTERVAL; +import static org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig.INTERVAL; + /** * This class contains utilities to identify which jobs are the "best" for a given aggregation tree. * It allows the caller to pass in a set of possible rollup job capabilities and get in return @@ -87,8 +91,7 @@ public class RollupJobIdentifierUtils { /** * Find the set of date_histo's with the largest granularity interval */ - private static void checkDateHisto(DateHistogramAggregationBuilder source, List jobCaps, - Set bestCaps) { + private static void checkDateHisto(DateHistogramAggregationBuilder source, List jobCaps, Set bestCaps) { ArrayList localCaps = new ArrayList<>(); for (RollupJobCaps cap : jobCaps) { RollupJobCaps.RollupFieldCaps fieldCaps = cap.getFieldCaps().get(source.field()); @@ -106,21 +109,102 @@ public class RollupJobIdentifierUtils { if (thisTimezone.getRules().equals(sourceTimeZone.getRules()) == false) { continue; } - if (source.dateHistogramInterval() != null) { - // Check if both are calendar and validate if they are. - // If not, check if both are fixed and validate - if (validateCalendarInterval(source.dateHistogramInterval(), interval)) { + + /* + This is convoluted, but new + legacy intervals makes for a big pattern match. + We have to match up date_histo [interval, fixed_interval, calendar_interval] with + rollup config [interval, fixed_interval, calendar_interval] + + To keep rightward drift to a minimum we break out of the loop if a successful match is found + */ + + DateHistogramInterval configCalendarInterval = agg.get(CALENDAR_INTERVAL) != null + ? new DateHistogramInterval((String) agg.get(CALENDAR_INTERVAL)) : null; + DateHistogramInterval configFixedInterval = agg.get(FIXED_INTERVAL) != null + ? new DateHistogramInterval((String) agg.get(FIXED_INTERVAL)) : null; + DateHistogramInterval configLegacyInterval = agg.get(INTERVAL) != null + ? new DateHistogramInterval((String) agg.get(INTERVAL)) : null; + + // If histo used calendar_interval explicitly + if (source.getCalendarInterval() != null) { + DateHistogramInterval requestInterval = source.getCalendarInterval(); + + // Try to use explicit calendar_interval on config if it exists + if (validateCalendarInterval(requestInterval, configCalendarInterval)) { localCaps.add(cap); - } else if (validateFixedInterval(source.dateHistogramInterval(), interval)) { + break; + } + + // Otherwise fall back to old style where we prefer calendar over fixed (e.g. `1h` == calendar) + if (validateCalendarInterval(requestInterval, configLegacyInterval)) { localCaps.add(cap); + break; + } + + // Note that this ignores FIXED_INTERVAL on purpose, it would not be compatible + + } else if (source.getFixedInterval() != null) { + // If histo used fixed_interval explicitly + + DateHistogramInterval requestInterval = source.getFixedInterval(); + + // Try to use explicit fixed_interval on config if it exists + if (validateFixedInterval(requestInterval, configFixedInterval)) { + localCaps.add(cap); + break; + } + + // Otherwise fall back to old style + if (validateFixedInterval(requestInterval, configLegacyInterval)) { + localCaps.add(cap); + break; + } + + // Note that this ignores CALENDER_INTERVAL on purpose, it would not be compatible + + } else if (source.dateHistogramInterval() != null) { + // The histo used a deprecated interval method, so meaning is ambiguous. + // Use legacy method of preferring calendar over fixed + final DateHistogramInterval requestInterval = source.dateHistogramInterval(); + + // Try to use explicit calendar_interval on config if it exists + // Both must be calendar intervals + if (validateCalendarInterval(requestInterval, configCalendarInterval)) { + localCaps.add(cap); + break; + } + + // Otherwise fall back to old style where we prefer calendar over fixed (e.g. `1h` == calendar) + // Need to verify that the config interval is in fact calendar here + if (isCalendarInterval(configLegacyInterval) + && validateCalendarInterval(requestInterval, configLegacyInterval)) { + + localCaps.add(cap); + break; + } + + // The histo's interval couldn't be parsed as a calendar, so it is assumed fixed. + // Try to use explicit fixed_interval on config if it exists + if (validateFixedInterval(requestInterval, configFixedInterval)) { + localCaps.add(cap); + break; + } + + } else if (source.interval() != 0) { + // Otherwise fall back to old style interval millis + // Need to verify that the config interval is not calendar here + if (isCalendarInterval(configLegacyInterval) == false + && validateFixedInterval(new DateHistogramInterval(source.interval() + "ms"), configLegacyInterval)) { + + localCaps.add(cap); + break; } } else { - // check if config is fixed and validate if it is - if (validateFixedInterval(source.interval(), interval)) { - localCaps.add(cap); - } + // This _should not_ happen, but if miraculously it does we need to just quit + throw new IllegalArgumentException("An interval of some variety must be configured on " + + "the date_histogram aggregation."); } - // not a candidate if we get here + // If we get here nothing matched, and we can break out break; } } @@ -141,32 +225,50 @@ public class RollupJobIdentifierUtils { } } + static String retrieveInterval(Map agg) { + String interval = (String) agg.get(RollupField.INTERVAL); + if (interval == null) { + interval = (String) agg.get(CALENDAR_INTERVAL); + } + if (interval == null) { + interval = (String) agg.get(FIXED_INTERVAL); + } + if (interval == null) { + throw new IllegalStateException("Could not find interval in agg cap: " + agg.toString()); + } + return interval; + } + private static boolean isCalendarInterval(DateHistogramInterval interval) { - return DateHistogramAggregationBuilder.DATE_FIELD_UNITS.containsKey(interval.toString()); + return interval != null && DateHistogramAggregationBuilder.DATE_FIELD_UNITS.containsKey(interval.toString()); } static boolean validateCalendarInterval(DateHistogramInterval requestInterval, DateHistogramInterval configInterval) { - // Both must be calendar intervals - if (isCalendarInterval(requestInterval) == false || isCalendarInterval(configInterval) == false) { + if (requestInterval == null || configInterval == null) { return false; } // The request must be gte the config. The CALENDAR_ORDERING map values are integers representing // relative orders between the calendar units Rounding.DateTimeUnit requestUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(requestInterval.toString()); - long requestOrder = requestUnit.getField().getBaseUnit().getDuration().toMillis(); + if (requestUnit == null) { + return false; + } Rounding.DateTimeUnit configUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(configInterval.toString()); + if (configUnit == null) { + return false; + } + + long requestOrder = requestUnit.getField().getBaseUnit().getDuration().toMillis(); long configOrder = configUnit.getField().getBaseUnit().getDuration().toMillis(); // All calendar units are multiples naturally, so we just care about gte return requestOrder >= configOrder; } - static boolean validateFixedInterval(DateHistogramInterval requestInterval, - DateHistogramInterval configInterval) { - // Neither can be calendar intervals - if (isCalendarInterval(requestInterval) || isCalendarInterval(configInterval)) { + static boolean validateFixedInterval(DateHistogramInterval requestInterval, DateHistogramInterval configInterval) { + if (requestInterval == null || configInterval == null) { return false; } @@ -180,18 +282,6 @@ public class RollupJobIdentifierUtils { return requestIntervalMillis >= configIntervalMillis && requestIntervalMillis % configIntervalMillis == 0; } - static boolean validateFixedInterval(long requestInterval, DateHistogramInterval configInterval) { - // config must not be a calendar interval - if (isCalendarInterval(configInterval)) { - return false; - } - long configIntervalMillis = TimeValue.parseTimeValue(configInterval.toString(), - "date_histo.config.interval").getMillis(); - - // Must be a multiple and gte the config - return requestInterval >= configIntervalMillis && requestInterval % configIntervalMillis == 0; - } - /** * Find the set of histo's with the largest interval */ @@ -202,7 +292,7 @@ public class RollupJobIdentifierUtils { if (fieldCaps != null) { for (Map agg : fieldCaps.getAggs()) { if (agg.get(RollupField.AGG).equals(HistogramAggregationBuilder.NAME)) { - Long interval = (long)agg.get(RollupField.INTERVAL); + long interval = (long) agg.get(RollupField.INTERVAL); // query interval must be gte the configured interval, and a whole multiple if (interval <= source.interval() && source.interval() % interval == 0) { localCaps.add(cap); @@ -324,7 +414,7 @@ public class RollupJobIdentifierUtils { for (RollupJobCaps.RollupFieldCaps fieldCaps : o1.getFieldCaps().values()) { for (Map agg : fieldCaps.getAggs()) { if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) { - thisTime = getMillisFixedOrCalendar((String) agg.get(RollupField.INTERVAL)); + thisTime = new DateHistogramInterval(retrieveInterval(agg)).estimateMillis(); } else if (agg.get(RollupField.AGG).equals(HistogramAggregationBuilder.NAME)) { thisHistoWeights += (long) agg.get(RollupField.INTERVAL); counter += 1; @@ -340,7 +430,7 @@ public class RollupJobIdentifierUtils { for (RollupJobCaps.RollupFieldCaps fieldCaps : o2.getFieldCaps().values()) { for (Map agg : fieldCaps.getAggs()) { if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) { - thatTime = getMillisFixedOrCalendar((String) agg.get(RollupField.INTERVAL)); + thatTime = new DateHistogramInterval(retrieveInterval(agg)).estimateMillis(); } else if (agg.get(RollupField.AGG).equals(HistogramAggregationBuilder.NAME)) { thatHistoWeights += (long) agg.get(RollupField.INTERVAL); counter += 1; @@ -385,14 +475,4 @@ public class RollupJobIdentifierUtils { // coverage }; } - - static long getMillisFixedOrCalendar(String value) { - DateHistogramInterval interval = new DateHistogramInterval(value); - if (isCalendarInterval(interval)) { - Rounding.DateTimeUnit intervalUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(interval.toString()); - return intervalUnit.getField().getBaseUnit().getDuration().toMillis(); - } else { - return TimeValue.parseTimeValue(value, "date_histo.comparator.interval").getMillis(); - } - } } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java index 7cf8f8d1293..b610dca4508 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupRequestTranslator.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; @@ -60,7 +61,7 @@ public class RollupRequestTranslator { * "the_histo": { * "date_histogram" : { * "field" : "ts", - * "interval" : "1d" + * "calendar_interval" : "1d" * }, * "aggs": { * "the_max": { @@ -93,7 +94,7 @@ public class RollupRequestTranslator { * "the_histo" : { * "date_histogram" : { * "field" : "ts.date_histogram.timestamp", - * "interval" : "1d" + * "calendar_interval" : "1d" * }, * "aggregations" : { * "the_histo._count" : { @@ -150,7 +151,7 @@ public class RollupRequestTranslator { * "the_histo": { * "date_histogram" : { * "field" : "ts", - * "interval" : "day" + * "calendar_interval" : "day" * } * } * } @@ -199,10 +200,16 @@ public class RollupRequestTranslator { DateHistogramAggregationBuilder rolledDateHisto = new DateHistogramAggregationBuilder(source.getName()); - if (source.dateHistogramInterval() != null) { + if (source.getCalendarInterval() != null) { + rolledDateHisto.calendarInterval(source.getCalendarInterval()); + } else if (source.getFixedInterval() != null) { + rolledDateHisto.fixedInterval(source.getFixedInterval()); + } else if (source.dateHistogramInterval() != null) { + // We have to fall back to deprecated interval because we're not sure if this is fixed or cal rolledDateHisto.dateHistogramInterval(source.dateHistogramInterval()); } else { - rolledDateHisto.interval(source.interval()); + // if interval() was used we know it is fixed and can upgrade + rolledDateHisto.fixedInterval(new DateHistogramInterval(source.interval() + "ms")); } ZoneId timeZone = source.timeZone() == null ? DateHistogramGroupConfig.DEFAULT_ZONEID_TIMEZONE : source.timeZone(); diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java index b60a37d3fa4..5a16be1456a 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java @@ -237,7 +237,13 @@ public abstract class RollupIndexer extends AsyncTwoPhaseIndexer UNITS = new ArrayList<>(DateHistogramAggregationBuilder.DATE_FIELD_UNITS.keySet()); public void testOneMatch() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); Set caps = singletonSet(cap); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval()); + .calendarInterval(job.getGroupConfig().getDateHistogram().getInterval()); Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); assertThat(bestCaps.size(), equalTo(1)); } public void testBiggerButCompatibleInterval() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); Set caps = singletonSet(cap); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(new DateHistogramInterval("1d")); + .calendarInterval(new DateHistogramInterval("1d")); Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); assertThat(bestCaps.size(), equalTo(1)); } public void testBiggerButCompatibleFixedInterval() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("100s"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.FixedInterval("foo", new DateHistogramInterval("100s"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); Set caps = singletonSet(cap); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(new DateHistogramInterval("1000s")); + .fixedInterval(new DateHistogramInterval("1000s")); + + Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); + assertThat(bestCaps.size(), equalTo(1)); + } + + public void testBiggerButCompatibleFixedIntervalInCalFormat() { + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.FixedInterval("foo", new DateHistogramInterval("1h"))); + final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); + RollupJobCaps cap = new RollupJobCaps(job); + Set caps = singletonSet(cap); + + DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") + .fixedInterval(new DateHistogramInterval("7d")); Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); assertThat(bestCaps.size(), equalTo(1)); } public void testBiggerButCompatibleFixedMillisInterval() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("100ms"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.FixedInterval("foo", new DateHistogramInterval("100ms"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); Set caps = singletonSet(cap); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .interval(1000); + .fixedInterval(new DateHistogramInterval("1000ms")); Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); assertThat(bestCaps.size(), equalTo(1)); } public void testIncompatibleInterval() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1d"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); Set caps = singletonSet(cap); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(new DateHistogramInterval("1h")); + .calendarInterval(new DateHistogramInterval("1h")); RuntimeException e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(builder, caps)); assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [date_histogram] agg on field " + @@ -110,13 +123,13 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { } public void testIncompatibleFixedCalendarInterval() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("5d"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.FixedInterval("foo", new DateHistogramInterval("5d"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); Set caps = singletonSet(cap); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(new DateHistogramInterval("day")); + .calendarInterval(new DateHistogramInterval("day")); RuntimeException e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(builder, caps)); assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [date_histogram] agg on field " + @@ -124,13 +137,14 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { } public void testBadTimeZone() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "CET")); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"), + null, "CET")); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); Set caps = singletonSet(cap); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(new DateHistogramInterval("1h")) + .calendarInterval(new DateHistogramInterval("1h")) .timeZone(ZoneOffset.UTC); RuntimeException e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(builder, caps)); @@ -139,7 +153,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { } public void testMetricOnlyAgg() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final List metrics = singletonList(new MetricConfig("bar", singletonList("max"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, metrics, null); RollupJobCaps cap = new RollupJobCaps(job); @@ -152,13 +166,13 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { } public void testOneOfTwoMatchingCaps() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); Set caps = singletonSet(cap); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(new DateHistogramInterval("1h")) + .calendarInterval(new DateHistogramInterval("1h")) .subAggregation(new MaxAggregationBuilder("the_max").field("bar")); RuntimeException e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(builder, caps)); @@ -167,20 +181,20 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { } public void testTwoJobsSameRollupIndex() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); Set caps = new HashSet<>(2); caps.add(cap); - final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final RollupJobConfig job2 = new RollupJobConfig("foo2", "index", job.getRollupIndex(), "*/5 * * * * ?", 10, group2, emptyList(), null); RollupJobCaps cap2 = new RollupJobCaps(job2); caps.add(cap2); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(new DateHistogramInterval("1h")); + .calendarInterval(new DateHistogramInterval("1h")); Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); @@ -189,7 +203,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { } public void testTwoJobsButBothPartialMatches() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final List metrics = singletonList(new MetricConfig("bar", singletonList("max"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, metrics, null); RollupJobCaps cap = new RollupJobCaps(job); @@ -202,7 +216,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { caps.add(cap2); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(new DateHistogramInterval("1h")) + .calendarInterval(new DateHistogramInterval("1h")) .subAggregation(new MaxAggregationBuilder("the_max").field("bar")) // <-- comes from job1 .subAggregation(new MinAggregationBuilder("the_min").field("bar")); // <-- comes from job2 @@ -212,17 +226,17 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { } public void testComparableDifferentDateIntervals() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); - final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"))); + final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1d"))); final RollupJobConfig job2 = new RollupJobConfig("foo2", "index", job.getRollupIndex(), "*/5 * * * * ?", 10, group2, emptyList(), null); RollupJobCaps cap2 = new RollupJobCaps(job2); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(new DateHistogramInterval("1d")); + .calendarInterval(new DateHistogramInterval("1d")); Set caps = new HashSet<>(2); caps.add(cap); @@ -234,17 +248,17 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { } public void testComparableDifferentDateIntervalsOnlyOneWorks() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); - final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"))); + final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1d"))); final RollupJobConfig job2 = new RollupJobConfig("foo2", "index", job.getRollupIndex(), "*/5 * * * * ?", 10, group2, emptyList(), null); RollupJobCaps cap2 = new RollupJobCaps(job2); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(new DateHistogramInterval("1h")); + .calendarInterval(new DateHistogramInterval("1h")); Set caps = new HashSet<>(2); caps.add(cap); @@ -256,18 +270,19 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { } public void testComparableNoHistoVsHisto() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); final HistogramGroupConfig histoConfig = new HistogramGroupConfig(100L, "bar"); - final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")), histoConfig, null); + final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h")), + histoConfig, null); final RollupJobConfig job2 = new RollupJobConfig("foo2", "index", job.getRollupIndex(), "*/5 * * * * ?", 10, group2, emptyList(), null); RollupJobCaps cap2 = new RollupJobCaps(job2); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(new DateHistogramInterval("1h")) + .calendarInterval(new DateHistogramInterval("1h")) .subAggregation(new HistogramAggregationBuilder("histo").field("bar").interval(100)); Set caps = new HashSet<>(2); @@ -280,18 +295,19 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { } public void testComparableNoTermsVsTerms() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); final TermsGroupConfig termsConfig = new TermsGroupConfig("bar"); - final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")), null, termsConfig); + final GroupConfig group2 = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h")), + null, termsConfig); final RollupJobConfig job2 = new RollupJobConfig("foo2", "index", job.getRollupIndex(), "*/5 * * * * ?", 10, group2, emptyList(), null); RollupJobCaps cap2 = new RollupJobCaps(job2); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(new DateHistogramInterval("1h")) + .calendarInterval(new DateHistogramInterval("1h")) .subAggregation(new TermsAggregationBuilder("histo", ValueType.STRING).field("bar")); Set caps = new HashSet<>(2); @@ -312,7 +328,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { final GroupConfig group = new GroupConfig( // NOTE same name but wrong type - new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()), + new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()), new HistogramGroupConfig(1L, "baz"), // <-- NOTE right type but wrong name null ); @@ -329,13 +345,13 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testMissingDateHisto() { DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo"); - histo.dateHistogramInterval(new DateHistogramInterval("1d")) + histo.calendarInterval(new DateHistogramInterval("1d")) .field("other_field") .subAggregation(new MaxAggregationBuilder("the_max").field("max_field")) .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); final GroupConfig group = new GroupConfig( - new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()) + new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()) ); final List metrics = Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg"))); @@ -350,14 +366,14 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testNoMatchingInterval() { DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo"); - histo.interval(1) + histo.fixedInterval(new DateHistogramInterval("1ms")) .field("foo") .subAggregation(new MaxAggregationBuilder("the_max").field("max_field")) .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); final GroupConfig group = new GroupConfig( // interval in job is much higher than agg interval above - new DateHistogramGroupConfig("foo", new DateHistogramInterval("100d"), null, DateTimeZone.UTC.getID()) + new DateHistogramGroupConfig.FixedInterval("foo", new DateHistogramInterval("100d"), null, DateTimeZone.UTC.getID()) ); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); Set caps = singletonSet(new RollupJobCaps(job)); @@ -369,14 +385,14 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testDateHistoMissingFieldInCaps() { DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo"); - histo.dateHistogramInterval(new DateHistogramInterval("1d")) + histo.calendarInterval(new DateHistogramInterval("1d")) .field("foo") .subAggregation(new MaxAggregationBuilder("the_max").field("max_field")) .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); final GroupConfig group = new GroupConfig( // NOTE different field from the one in the query - new DateHistogramGroupConfig("bar", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()) + new DateHistogramGroupConfig.CalendarInterval("bar", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()) ); final List metrics = Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg"))); @@ -397,7 +413,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); final GroupConfig group = new GroupConfig( - new DateHistogramGroupConfig("bar", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()), + new DateHistogramGroupConfig.CalendarInterval("bar", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()), new HistogramGroupConfig(1L, "baz"), // <-- NOTE right type but wrong name null ); @@ -420,7 +436,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); final GroupConfig group = new GroupConfig( - new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()), + new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID()), new HistogramGroupConfig(1L, "baz"), // <-- NOTE right type but wrong name null ); @@ -440,7 +456,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { .subAggregation(new MaxAggregationBuilder("the_max").field("max_field")) .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1d"), null, "UTC"), new HistogramGroupConfig(3L, "bar"), null); @@ -457,7 +473,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testMissingMetric() { int i = ESTestCase.randomIntBetween(0, 3); - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final List metrics = singletonList(new MetricConfig("foo", Arrays.asList("avg", "max", "min", "sum"))); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); Set caps = singletonSet(new RollupJobCaps(job)); @@ -487,37 +503,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { } public void testValidateFixedInterval() { - boolean valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("100ms")); - assertTrue(valid); - - valid = RollupJobIdentifierUtils.validateFixedInterval(200, new DateHistogramInterval("100ms")); - assertTrue(valid); - - valid = RollupJobIdentifierUtils.validateFixedInterval(1000, new DateHistogramInterval("200ms")); - assertTrue(valid); - - valid = RollupJobIdentifierUtils.validateFixedInterval(5*60*1000, new DateHistogramInterval("5m")); - assertTrue(valid); - - valid = RollupJobIdentifierUtils.validateFixedInterval(10*5*60*1000, new DateHistogramInterval("5m")); - assertTrue(valid); - - valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("500ms")); - assertFalse(valid); - - valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("5m")); - assertFalse(valid); - - valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("minute")); - assertFalse(valid); - - valid = RollupJobIdentifierUtils.validateFixedInterval(100, new DateHistogramInterval("second")); - assertFalse(valid); - - // ----------- - // Same tests, with both being DateHistoIntervals - // ----------- - valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), + boolean valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), new DateHistogramInterval("100ms")); assertTrue(valid); @@ -545,13 +531,11 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { new DateHistogramInterval("5m")); assertFalse(valid); - valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), - new DateHistogramInterval("minute")); - assertFalse(valid); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), new DateHistogramInterval("minute"))); + assertThat(e.getMessage(), equalTo("failed to parse setting [date_histo.config.interval] with value " + + "[minute] as a time value: unit is missing or unrecognized")); - valid = RollupJobIdentifierUtils.validateFixedInterval(new DateHistogramInterval("100ms"), - new DateHistogramInterval("second")); - assertFalse(valid); } public void testValidateCalendarInterval() { @@ -590,8 +574,16 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { List caps = new ArrayList<>(numCaps); for (int i = 0; i < numCaps; i++) { - DateHistogramInterval interval = getRandomInterval(); - GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", interval)); + DateHistogramInterval interval; + DateHistogramGroupConfig dateHistoConfig; + if (randomBoolean()) { + interval = getRandomCalendarInterval(); + dateHistoConfig = new DateHistogramGroupConfig.CalendarInterval("foo", interval); + } else { + interval = getRandomFixedInterval(); + dateHistoConfig = new DateHistogramGroupConfig.FixedInterval("foo", interval); + } + GroupConfig group = new GroupConfig(dateHistoConfig); RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); caps.add(cap); @@ -617,7 +609,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { for (int i = 0; i < numCaps; i++) { DateHistogramInterval interval = getRandomFixedInterval(); - GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", interval)); + GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.FixedInterval("foo", interval)); RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); caps.add(cap); @@ -643,7 +635,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { for (int i = 0; i < numCaps; i++) { DateHistogramInterval interval = getRandomCalendarInterval(); - GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", interval)); + GroupConfig group = new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", interval)); RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); caps.add(cap); @@ -665,21 +657,22 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { public void testObsoleteTimezone() { // Job has "obsolete" timezone - DateHistogramGroupConfig dateHisto = new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "Canada/Mountain"); + DateHistogramGroupConfig dateHisto = new DateHistogramGroupConfig.CalendarInterval("foo", + new DateHistogramInterval("1h"), null, "Canada/Mountain"); GroupConfig group = new GroupConfig(dateHisto); RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); Set caps = singletonSet(cap); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval()) + .calendarInterval(job.getGroupConfig().getDateHistogram().getInterval()) .timeZone(ZoneId.of("Canada/Mountain")); Set bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); assertThat(bestCaps.size(), equalTo(1)); builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval()) + .calendarInterval(job.getGroupConfig().getDateHistogram().getInterval()) .timeZone(ZoneId.of("America/Edmonton")); bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); @@ -687,21 +680,21 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { // now the reverse, job has "new" timezone - dateHisto = new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "America/Edmonton"); + dateHisto = new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"), null, "America/Edmonton"); group = new GroupConfig(dateHisto); job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); cap = new RollupJobCaps(job); caps = singletonSet(cap); builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval()) + .calendarInterval(job.getGroupConfig().getDateHistogram().getInterval()) .timeZone(ZoneId.of("Canada/Mountain")); bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); assertThat(bestCaps.size(), equalTo(1)); builder = new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval()) + .calendarInterval(job.getGroupConfig().getDateHistogram().getInterval()) .timeZone(ZoneId.of("America/Edmonton")); bestCaps = RollupJobIdentifierUtils.findBestJobs(builder, caps); @@ -712,20 +705,13 @@ public class RollupJobIdentifierUtilTests extends ESTestCase { for (RollupJobCaps.RollupFieldCaps fieldCaps : cap.getFieldCaps().values()) { for (Map agg : fieldCaps.getAggs()) { if (agg.get(RollupField.AGG).equals(DateHistogramAggregationBuilder.NAME)) { - return RollupJobIdentifierUtils.getMillisFixedOrCalendar((String) agg.get(RollupField.INTERVAL)); + return new DateHistogramInterval(RollupJobIdentifierUtils.retrieveInterval(agg)).estimateMillis(); } } } return Long.MAX_VALUE; } - private static DateHistogramInterval getRandomInterval() { - if (randomBoolean()) { - return getRandomFixedInterval(); - } - return getRandomCalendarInterval(); - } - private static DateHistogramInterval getRandomFixedInterval() { int value = randomIntBetween(1, 1000); String unit; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java index db58115489d..27dcc751860 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupRequestTranslationTests.java @@ -57,18 +57,18 @@ public class RollupRequestTranslationTests extends ESTestCase { public void testBasicDateHisto() { DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo"); - histo.dateHistogramInterval(new DateHistogramInterval("1d")) + histo.calendarInterval(new DateHistogramInterval("1d")) .field("foo") .extendedBounds(new ExtendedBounds(0L, 1000L)) .subAggregation(new MaxAggregationBuilder("the_max").field("max_field")) .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); - + List translated = translateAggregation(histo, namedWriteableRegistry); assertThat(translated.size(), equalTo(1)); assertThat(translated.get(0), Matchers.instanceOf(DateHistogramAggregationBuilder.class)); DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0); - assertThat(translatedHisto.dateHistogramInterval(), equalTo(new DateHistogramInterval("1d"))); + assertThat(translatedHisto.getCalendarInterval(), equalTo(new DateHistogramInterval("1d"))); assertThat(translatedHisto.field(), equalTo("foo.date_histogram.timestamp")); assertThat(translatedHisto.getSubAggregations().size(), equalTo(4)); @@ -93,7 +93,7 @@ public class RollupRequestTranslationTests extends ESTestCase { public void testFormattedDateHisto() { DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo"); - histo.dateHistogramInterval(new DateHistogramInterval("1d")) + histo.calendarInterval(new DateHistogramInterval("1d")) .field("foo") .extendedBounds(new ExtendedBounds(0L, 1000L)) .format("yyyy-MM-dd") @@ -104,7 +104,7 @@ public class RollupRequestTranslationTests extends ESTestCase { assertThat(translated.get(0), Matchers.instanceOf(DateHistogramAggregationBuilder.class)); DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0); - assertThat(translatedHisto.dateHistogramInterval(), equalTo(new DateHistogramInterval("1d"))); + assertThat(translatedHisto.getCalendarInterval(), equalTo(new DateHistogramInterval("1d"))); assertThat(translatedHisto.format(), equalTo("yyyy-MM-dd")); assertThat(translatedHisto.field(), equalTo("foo.date_histogram.timestamp")); } @@ -150,7 +150,7 @@ public class RollupRequestTranslationTests extends ESTestCase { public void testDateHistoIntervalWithMinMax() { DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo"); - histo.dateHistogramInterval(new DateHistogramInterval("1d")) + histo.calendarInterval(new DateHistogramInterval("1d")) .field("foo") .subAggregation(new MaxAggregationBuilder("the_max").field("max_field")) .subAggregation(new AvgAggregationBuilder("the_avg").field("avg_field")); @@ -160,7 +160,7 @@ public class RollupRequestTranslationTests extends ESTestCase { assertThat(translated.get(0), instanceOf(DateHistogramAggregationBuilder.class)); DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0); - assertThat(translatedHisto.dateHistogramInterval().toString(), equalTo("1d")); + assertThat(translatedHisto.getCalendarInterval().toString(), equalTo("1d")); assertThat(translatedHisto.field(), equalTo("foo.date_histogram.timestamp")); assertThat(translatedHisto.getSubAggregations().size(), equalTo(4)); @@ -195,7 +195,8 @@ public class RollupRequestTranslationTests extends ESTestCase { assertThat(translated.get(0), instanceOf(DateHistogramAggregationBuilder.class)); DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0); - assertThat(translatedHisto.interval(), equalTo(86400000L)); + assertNull(translatedHisto.getCalendarInterval()); + assertThat(translatedHisto.getFixedInterval(), equalTo(new DateHistogramInterval("86400000ms"))); assertThat(translatedHisto.field(), equalTo("foo.date_histogram.timestamp")); assertThat(translatedHisto.getSubAggregations().size(), equalTo(4)); @@ -216,12 +217,15 @@ public class RollupRequestTranslationTests extends ESTestCase { assertThat(subAggs.get("test_histo._count"), instanceOf(SumAggregationBuilder.class)); assertThat(((SumAggregationBuilder)subAggs.get("test_histo._count")).field(), equalTo("foo.date_histogram._count")); + + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] " + + "or [calendar_interval] in the future."); } public void testDateHistoWithTimezone() { ZoneId timeZone = ZoneId.of(randomFrom(ZoneId.getAvailableZoneIds())); DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo"); - histo.interval(86400000) + histo.fixedInterval(new DateHistogramInterval("86400000ms")) .field("foo") .timeZone(timeZone); @@ -230,11 +234,55 @@ public class RollupRequestTranslationTests extends ESTestCase { assertThat(translated.get(0), instanceOf(DateHistogramAggregationBuilder.class)); DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0); - assertThat(translatedHisto.interval(), equalTo(86400000L)); + assertThat(translatedHisto.getFixedInterval().toString(), equalTo("86400000ms")); assertThat(translatedHisto.field(), equalTo("foo.date_histogram.timestamp")); assertThat(translatedHisto.timeZone(), equalTo(timeZone)); } + public void testDeprecatedInterval() { + DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo"); + histo.interval(86400000).field("foo"); + + List translated = translateAggregation(histo, namedWriteableRegistry); + assertThat(translated.size(), equalTo(1)); + assertThat(translated.get(0), instanceOf(DateHistogramAggregationBuilder.class)); + DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0); + + assertThat(translatedHisto.getFixedInterval().toString(), equalTo("86400000ms")); + assertThat(translatedHisto.field(), equalTo("foo.date_histogram.timestamp")); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] " + + "or [calendar_interval] in the future."); + } + + public void testDeprecatedDateHistoInterval() { + DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("test_histo"); + histo.dateHistogramInterval(new DateHistogramInterval("1d")).field("foo"); + + List translated = translateAggregation(histo, namedWriteableRegistry); + assertThat(translated.size(), equalTo(1)); + assertThat(translated.get(0), instanceOf(DateHistogramAggregationBuilder.class)); + DateHistogramAggregationBuilder translatedHisto = (DateHistogramAggregationBuilder)translated.get(0); + + assertThat(translatedHisto.dateHistogramInterval().toString(), equalTo("1d")); + assertThat(translatedHisto.field(), equalTo("foo.date_histogram.timestamp")); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] " + + "or [calendar_interval] in the future."); + + + histo = new DateHistogramAggregationBuilder("test_histo"); + histo.dateHistogramInterval(new DateHistogramInterval("4d")).field("foo"); + + translated = translateAggregation(histo, namedWriteableRegistry); + assertThat(translated.size(), equalTo(1)); + assertThat(translated.get(0), instanceOf(DateHistogramAggregationBuilder.class)); + translatedHisto = (DateHistogramAggregationBuilder)translated.get(0); + + assertThat(translatedHisto.dateHistogramInterval().toString(), equalTo("4d")); + assertThat(translatedHisto.field(), equalTo("foo.date_histogram.timestamp")); + assertWarnings("[interval] on [date_histogram] is deprecated, use [fixed_interval] " + + "or [calendar_interval] in the future."); + } + public void testAvgMetric() { List translated = translateAggregation(new AvgAggregationBuilder("test_metric") .field("foo"), namedWriteableRegistry); @@ -320,7 +368,6 @@ public class RollupRequestTranslationTests extends ESTestCase { assertThat(subAggs.get("test_histo._count"), Matchers.instanceOf(SumAggregationBuilder.class)); assertThat(((SumAggregationBuilder)subAggs.get("test_histo._count")).field(), equalTo("foo.histogram._count")); - } public void testUnsupportedAgg() { diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java index 6c20b90f5ae..25fe2f51b2f 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java @@ -51,6 +51,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.filter.FilterAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregationBuilder; @@ -473,7 +474,7 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { = new GeoBoundsAggregationBuilder("histo").field("bar"); DateHistogramAggregationBuilder histoBuilder = new DateHistogramAggregationBuilder("histo") - .field("bar").interval(100); + .field("bar").fixedInterval(new DateHistogramInterval("100ms")); FilterAggregationBuilder filterBuilder = new FilterAggregationBuilder("filter", new TermQueryBuilder("foo", "bar")); filterBuilder.subAggregation(histoBuilder); @@ -517,11 +518,11 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { public void testDateHisto() throws IOException { DateHistogramAggregationBuilder nonRollupHisto = new DateHistogramAggregationBuilder("histo") - .field("timestamp").interval(100); + .field("timestamp").fixedInterval(new DateHistogramInterval("100ms")); DateHistogramAggregationBuilder rollupHisto = new DateHistogramAggregationBuilder("histo") .field("timestamp.date_histogram." + RollupField.TIMESTAMP) - .interval(100) + .fixedInterval(new DateHistogramInterval("100ms")) .subAggregation(new SumAggregationBuilder("histo." + RollupField.COUNT_FIELD) .field("timestamp.date_histogram." + RollupField.COUNT_FIELD)); @@ -561,12 +562,12 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { public void testDateHistoWithGap() throws IOException { DateHistogramAggregationBuilder nonRollupHisto = new DateHistogramAggregationBuilder("histo") - .field("timestamp").interval(100) + .field("timestamp").fixedInterval(new DateHistogramInterval("100ms")) .minDocCount(0); DateHistogramAggregationBuilder rollupHisto = new DateHistogramAggregationBuilder("histo") .field("timestamp.date_histogram." + RollupField.TIMESTAMP) - .interval(100) + .fixedInterval(new DateHistogramInterval("100ms")) .minDocCount(0) .subAggregation(new SumAggregationBuilder("histo." + RollupField.COUNT_FIELD) .field("timestamp.date_histogram." + RollupField.COUNT_FIELD)); @@ -618,12 +619,12 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { public void testNonMatchingPartition() throws IOException { DateHistogramAggregationBuilder nonRollupHisto = new DateHistogramAggregationBuilder("histo") - .field("timestamp").interval(100) + .field("timestamp").fixedInterval(new DateHistogramInterval("100ms")) .minDocCount(0); DateHistogramAggregationBuilder rollupHisto = new DateHistogramAggregationBuilder("histo") .field("timestamp.date_histogram." + RollupField.TIMESTAMP) - .interval(100) + .fixedInterval(new DateHistogramInterval("100ms")) .minDocCount(0) .subAggregation(new SumAggregationBuilder("histo." + RollupField.COUNT_FIELD) .field("timestamp.date_histogram." + RollupField.COUNT_FIELD)); @@ -731,11 +732,11 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { public void testDateHistoOverlappingAggTrees() throws IOException { DateHistogramAggregationBuilder nonRollupHisto = new DateHistogramAggregationBuilder("histo") - .field("timestamp").interval(100); + .field("timestamp").fixedInterval(new DateHistogramInterval("100ms")); DateHistogramAggregationBuilder rollupHisto = new DateHistogramAggregationBuilder("histo") .field("timestamp.date_histogram." + RollupField.TIMESTAMP) - .interval(100) + .fixedInterval(new DateHistogramInterval("100ms")) .subAggregation(new SumAggregationBuilder("histo." + RollupField.COUNT_FIELD) .field("timestamp.date_histogram." + RollupField.COUNT_FIELD)); @@ -792,11 +793,11 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { public void testDateHistoOverlappingMergeRealIntoZero() throws IOException { DateHistogramAggregationBuilder nonRollupHisto = new DateHistogramAggregationBuilder("histo") - .field("timestamp").interval(100); + .field("timestamp").fixedInterval(new DateHistogramInterval("100ms")); DateHistogramAggregationBuilder rollupHisto = new DateHistogramAggregationBuilder("histo") .field("timestamp.date_histogram." + RollupField.TIMESTAMP) - .interval(100) + .fixedInterval(new DateHistogramInterval("100ms")) .subAggregation(new SumAggregationBuilder("histo." + RollupField.COUNT_FIELD) .field("timestamp.date_histogram." + RollupField.COUNT_FIELD)); @@ -859,11 +860,11 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { public void testDateHistoOverlappingMergeZeroIntoReal() throws IOException { DateHistogramAggregationBuilder nonRollupHisto = new DateHistogramAggregationBuilder("histo") - .field("timestamp").interval(100).minDocCount(0); + .field("timestamp").fixedInterval(new DateHistogramInterval("100ms")).minDocCount(0); DateHistogramAggregationBuilder rollupHisto = new DateHistogramAggregationBuilder("histo") .field("timestamp.date_histogram." + RollupField.TIMESTAMP) - .interval(100) + .fixedInterval(new DateHistogramInterval("100ms")) .minDocCount(0) .subAggregation(new SumAggregationBuilder("histo." + RollupField.COUNT_FIELD) .field("timestamp.date_histogram." + RollupField.COUNT_FIELD)); @@ -1218,11 +1219,11 @@ public class RollupResponseTranslationTests extends AggregatorTestCase { public void testOverlappingBuckets() throws IOException { DateHistogramAggregationBuilder nonRollupHisto = new DateHistogramAggregationBuilder("histo") - .field("timestamp").interval(100); + .field("timestamp").fixedInterval(new DateHistogramInterval("100ms")); DateHistogramAggregationBuilder rollupHisto = new DateHistogramAggregationBuilder("histo") .field("timestamp.date_histogram." + RollupField.TIMESTAMP) - .interval(100) + .fixedInterval(new DateHistogramInterval("100ms")) .subAggregation(new SumAggregationBuilder("histo." + RollupField.COUNT_FIELD) .field("timestamp.date_histogram." + RollupField.COUNT_FIELD)); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java index 448e901997f..00f2fbd3171 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java @@ -100,7 +100,7 @@ public class SearchActionTests extends ESTestCase { SearchSourceBuilder source = new SearchSourceBuilder(); source.query(new MatchAllQueryBuilder()); source.size(100); - source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo").interval(123)); + source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo").fixedInterval(new DateHistogramInterval("123ms"))); SearchRequest request = new SearchRequest(normalIndices, source); NamedWriteableRegistry registry = mock(NamedWriteableRegistry.class); Exception e = expectThrows(IllegalArgumentException.class, @@ -111,7 +111,7 @@ public class SearchActionTests extends ESTestCase { public void testBadQuery() { SearchSourceBuilder source = new SearchSourceBuilder(); source.query(new MatchPhraseQueryBuilder("foo", "bar")); - source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo").interval(123)); + source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo").fixedInterval(new DateHistogramInterval("123ms"))); source.size(0); Exception e = expectThrows(IllegalArgumentException.class, () -> TransportRollupSearchAction.rewriteQuery(new MatchPhraseQueryBuilder("foo", "bar"), Collections.emptySet())); @@ -119,7 +119,8 @@ public class SearchActionTests extends ESTestCase { } public void testRangeTimezoneUTC() { - final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig groupConfig = new GroupConfig( + new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final RollupJobConfig config = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, groupConfig, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(config); Set caps = new HashSet<>(); @@ -131,7 +132,8 @@ public class SearchActionTests extends ESTestCase { } public void testRangeNullTimeZone() { - final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, null)); + final GroupConfig groupConfig = new GroupConfig( + new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"), null, null)); final RollupJobConfig config = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, groupConfig, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(config); Set caps = new HashSet<>(); @@ -143,7 +145,8 @@ public class SearchActionTests extends ESTestCase { } public void testRangeDifferentTZ() { - final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "UTC")); + final GroupConfig groupConfig = new GroupConfig( + new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"), null, "UTC")); final RollupJobConfig config = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, groupConfig, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(config); Set caps = new HashSet<>(); @@ -155,7 +158,8 @@ public class SearchActionTests extends ESTestCase { public void testTermQuery() { final TermsGroupConfig terms = new TermsGroupConfig("foo"); - final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("boo", new DateHistogramInterval("1h")), null, terms); + final GroupConfig groupConfig = new GroupConfig( + new DateHistogramGroupConfig.CalendarInterval("boo", new DateHistogramInterval("1h")), null, terms); final RollupJobConfig config = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, groupConfig, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(config); Set caps = new HashSet<>(); @@ -167,7 +171,8 @@ public class SearchActionTests extends ESTestCase { public void testTermsQuery() { final TermsGroupConfig terms = new TermsGroupConfig("foo"); - final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("boo", new DateHistogramInterval("1h")), null, terms); + final GroupConfig groupConfig = new GroupConfig( + new DateHistogramGroupConfig.CalendarInterval("boo", new DateHistogramInterval("1h")), null, terms); final RollupJobConfig config = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, groupConfig, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(config); Set caps = new HashSet<>(); @@ -181,7 +186,8 @@ public class SearchActionTests extends ESTestCase { } public void testCompounds() { - final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig groupConfig = new GroupConfig( + new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final RollupJobConfig config = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, groupConfig, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(config); Set caps = new HashSet<>(); @@ -195,7 +201,8 @@ public class SearchActionTests extends ESTestCase { } public void testMatchAll() { - final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig groupConfig = new GroupConfig( + new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final RollupJobConfig config = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, groupConfig, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(config); Set caps = new HashSet<>(); @@ -206,7 +213,8 @@ public class SearchActionTests extends ESTestCase { public void testAmbiguousResolution() { final TermsGroupConfig terms = new TermsGroupConfig("foo"); - final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")), null, terms); + final GroupConfig groupConfig = new GroupConfig( + new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h")), null, terms); final RollupJobConfig config = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, groupConfig, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(config); Set caps = new HashSet<>(); @@ -254,7 +262,7 @@ public class SearchActionTests extends ESTestCase { TransportRollupSearchAction.RollupSearchContext ctx = new TransportRollupSearchAction.RollupSearchContext(normalIndices, rollupIndices, Collections.emptySet()); SearchSourceBuilder source = new SearchSourceBuilder(); - source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo").interval(123)); + source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo").fixedInterval(new DateHistogramInterval("123ms"))); source.postFilter(new TermQueryBuilder("foo", "bar")); source.size(0); SearchRequest request = new SearchRequest(normalIndices, source); @@ -355,7 +363,8 @@ public class SearchActionTests extends ESTestCase { } public void testGood() { - final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"))); + final GroupConfig groupConfig = new GroupConfig( + new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h"))); final RollupJobConfig config = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, groupConfig, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(config); Set caps = singleton(cap); @@ -371,7 +380,7 @@ public class SearchActionTests extends ESTestCase { source.query(getQueryBuilder(1)); source.size(0); source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(config.getGroupConfig().getDateHistogram().getInterval())); + .calendarInterval(config.getGroupConfig().getDateHistogram().getInterval())); SearchRequest request = new SearchRequest(combinedIndices, source); MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, namedWriteableRegistry, ctx); @@ -396,11 +405,11 @@ public class SearchActionTests extends ESTestCase { SearchSourceBuilder source = new SearchSourceBuilder(); source.query(null); source.size(0); - source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo").dateHistogramInterval(new DateHistogramInterval("1d"))); + source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo").calendarInterval(new DateHistogramInterval("1d"))); SearchRequest request = new SearchRequest(combinedIndices, source); - final GroupConfig groupConfig = - new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID())); + final GroupConfig groupConfig = new GroupConfig( + new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1d"), null, DateTimeZone.UTC.getID())); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, groupConfig, emptyList(), null); Set caps = singleton(new RollupJobCaps(job)); @@ -422,7 +431,8 @@ public class SearchActionTests extends ESTestCase { } public void testTwoMatchingJobs() { - final GroupConfig groupConfig = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")), null, null); + final GroupConfig groupConfig = new GroupConfig( + new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h")), null, null); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, groupConfig, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); @@ -447,7 +457,7 @@ public class SearchActionTests extends ESTestCase { source.query(getQueryBuilder(1)); source.size(0); source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval())); + .calendarInterval(job.getGroupConfig().getDateHistogram().getInterval())); SearchRequest request = new SearchRequest(combinedIndices, source); MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, namedWriteableRegistry, ctx); @@ -468,7 +478,7 @@ public class SearchActionTests extends ESTestCase { public void testTwoMatchingJobsOneBetter() { final GroupConfig groupConfig = - new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h")), null, null); + new GroupConfig(new DateHistogramGroupConfig.CalendarInterval("foo", new DateHistogramInterval("1h")), null, null); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, groupConfig, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); @@ -494,7 +504,7 @@ public class SearchActionTests extends ESTestCase { source.query(getQueryBuilder(1)); source.size(0); source.aggregation(new DateHistogramAggregationBuilder("foo").field("foo") - .dateHistogramInterval(job.getGroupConfig().getDateHistogram().getInterval())); + .calendarInterval(job.getGroupConfig().getDateHistogram().getInterval())); SearchRequest request = new SearchRequest(combinedIndices, source); MultiSearchRequest msearch = TransportRollupSearchAction.createMSearchRequest(request, namedWriteableRegistry, ctx); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java index 2f0612a65d2..32f05bed4e7 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.rollup.config; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; +import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig.CalendarInterval; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; import org.elasticsearch.xpack.core.rollup.job.HistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.MetricConfig; @@ -57,36 +58,36 @@ public class ConfigTests extends ESTestCase { public void testEmptyDateHistoField() { Exception e = expectThrows(IllegalArgumentException.class, - () -> new DateHistogramGroupConfig(null, DateHistogramInterval.HOUR)); + () -> new CalendarInterval(null, DateHistogramInterval.HOUR)); assertThat(e.getMessage(), equalTo("Field must be a non-null, non-empty string")); - e = expectThrows(IllegalArgumentException.class, () -> new DateHistogramGroupConfig("", DateHistogramInterval.HOUR)); + e = expectThrows(IllegalArgumentException.class, () -> new CalendarInterval("", DateHistogramInterval.HOUR)); assertThat(e.getMessage(), equalTo("Field must be a non-null, non-empty string")); } public void testEmptyDateHistoInterval() { - Exception e = expectThrows(IllegalArgumentException.class, () -> new DateHistogramGroupConfig("foo", null)); + Exception e = expectThrows(IllegalArgumentException.class, () -> new CalendarInterval("foo", null)); assertThat(e.getMessage(), equalTo("Interval must be non-null")); } public void testNullTimeZone() { - DateHistogramGroupConfig config = new DateHistogramGroupConfig("foo", DateHistogramInterval.HOUR, null, null); + DateHistogramGroupConfig config = new CalendarInterval("foo", DateHistogramInterval.HOUR, null, null); assertThat(config.getTimeZone(), equalTo(DateTimeZone.UTC.getID())); } public void testEmptyTimeZone() { - DateHistogramGroupConfig config = new DateHistogramGroupConfig("foo", DateHistogramInterval.HOUR, null, ""); + DateHistogramGroupConfig config = new CalendarInterval("foo", DateHistogramInterval.HOUR, null, ""); assertThat(config.getTimeZone(), equalTo(DateTimeZone.UTC.getID())); } public void testDefaultTimeZone() { - DateHistogramGroupConfig config = new DateHistogramGroupConfig("foo", DateHistogramInterval.HOUR); + DateHistogramGroupConfig config = new CalendarInterval("foo", DateHistogramInterval.HOUR); assertThat(config.getTimeZone(), equalTo(DateTimeZone.UTC.getID())); } - public void testUnknownTimeZone() { + public void testUnkownTimeZone() { Exception e = expectThrows(ZoneRulesException.class, - () -> new DateHistogramGroupConfig("foo", DateHistogramInterval.HOUR, null, "FOO")); + () -> new CalendarInterval("foo", DateHistogramInterval.HOUR, null, "FOO")); assertThat(e.getMessage(), equalTo("Unknown time-zone ID: FOO")); } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java index 38b90328a87..080482735e3 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/IndexerUtilsTests.java @@ -101,8 +101,8 @@ public class IndexerUtilsTests extends AggregatorTestCase { valueFieldType.setName(valueField); // Setup the composite agg - //TODO swap this over to DateHistoConfig.Builder once DateInterval is in - DateHistogramGroupConfig dateHistoGroupConfig = new DateHistogramGroupConfig(timestampField, DateHistogramInterval.DAY); + DateHistogramGroupConfig dateHistoGroupConfig + = new DateHistogramGroupConfig.CalendarInterval(timestampField, DateHistogramInterval.DAY); CompositeAggregationBuilder compositeBuilder = new CompositeAggregationBuilder(RollupIndexer.AGGREGATION_NAME, RollupIndexer.createValueSourceBuilders(dateHistoGroupConfig)); @@ -169,7 +169,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { DateHistogramValuesSourceBuilder dateHisto = new DateHistogramValuesSourceBuilder("the_histo." + DateHistogramAggregationBuilder.NAME) .field(timestampField) - .interval(1); + .fixedInterval(new DateHistogramInterval("1ms")); CompositeAggregationBuilder compositeBuilder = new CompositeAggregationBuilder(RollupIndexer.AGGREGATION_NAME, singletonList(dateHisto)); @@ -292,7 +292,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { DateHistogramValuesSourceBuilder dateHisto = new DateHistogramValuesSourceBuilder("the_histo." + DateHistogramAggregationBuilder.NAME) .field(timestampField) - .dateHistogramInterval(new DateHistogramInterval("1d")); + .calendarInterval(new DateHistogramInterval("1d")); CompositeAggregationBuilder compositeBuilder = new CompositeAggregationBuilder(RollupIndexer.AGGREGATION_NAME, singletonList(dateHisto)); @@ -607,7 +607,7 @@ public class IndexerUtilsTests extends AggregatorTestCase { DateHistogramValuesSourceBuilder dateHisto = new DateHistogramValuesSourceBuilder("the_histo." + DateHistogramAggregationBuilder.NAME) .field(timestampField) - .dateHistogramInterval(new DateHistogramInterval("1d")) + .calendarInterval(new DateHistogramInterval("1d")) .timeZone(ZoneId.of("-01:00", ZoneId.SHORT_IDS)); // adds a timezone so that we aren't on default UTC CompositeAggregationBuilder compositeBuilder = new CompositeAggregationBuilder(RollupIndexer.AGGREGATION_NAME, diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index 18365c2b485..b0b6dc83337 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -49,6 +49,8 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInter import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; +import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig.CalendarInterval; +import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig.FixedInterval; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; import org.elasticsearch.xpack.core.rollup.job.MetricConfig; import org.elasticsearch.xpack.core.rollup.job.RollupJob; @@ -96,7 +98,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { public void testSimpleDateHisto() throws Exception { String rollupIndex = randomAlphaOfLength(10); String field = "the_histo"; - DateHistogramGroupConfig dateHistoConfig = new DateHistogramGroupConfig(field, new DateHistogramInterval("1ms")); + DateHistogramGroupConfig dateHistoConfig = new FixedInterval(field, new DateHistogramInterval("1ms")); RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.emptyList()); final List> dataset = new ArrayList<>(); dataset.addAll( @@ -140,7 +142,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { public void testDateHistoAndMetrics() throws Exception { String rollupIndex = randomAlphaOfLength(10); String field = "the_histo"; - DateHistogramGroupConfig dateHistoConfig = new DateHistogramGroupConfig(field, new DateHistogramInterval("1h")); + DateHistogramGroupConfig dateHistoConfig = new CalendarInterval(field, new DateHistogramInterval("1h")); MetricConfig config = new MetricConfig("counter", Arrays.asList("avg", "sum", "max", "min")); RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.singletonList(config)); final List> dataset = new ArrayList<>(); @@ -263,7 +265,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { String rollupIndex = randomAlphaOfLengthBetween(5, 10); String field = "the_histo"; DateHistogramGroupConfig dateHistoConfig = - new DateHistogramGroupConfig(field, new DateHistogramInterval("1m"), new DateHistogramInterval("1h"), null); + new FixedInterval(field, new DateHistogramInterval("1m"), new DateHistogramInterval("1h"), null); RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.emptyList()); final List> dataset = new ArrayList<>(); long now = System.currentTimeMillis(); @@ -344,7 +346,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { String timeZone = DateTimeZone.forOffsetHours(-3).getID(); String rollupIndex = randomAlphaOfLengthBetween(5, 10); String field = "the_histo"; - DateHistogramGroupConfig dateHistoConfig = new DateHistogramGroupConfig(field, new DateHistogramInterval("1d"), null, timeZone); + DateHistogramGroupConfig dateHistoConfig = new CalendarInterval(field, new DateHistogramInterval("1d"), null, timeZone); RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.emptyList()); executeTestCase(dataset, job, now, (resp) -> { @@ -404,9 +406,9 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { String timestampField = "ts"; String valueField = "the_avg"; - String timeInterval = randomIntBetween(1, 10) + randomFrom("h", "m"); + String timeInterval = randomIntBetween(2, 10) + randomFrom("h", "m"); DateHistogramGroupConfig dateHistoConfig = - new DateHistogramGroupConfig(timestampField, new DateHistogramInterval(timeInterval)); + new FixedInterval(timestampField, new DateHistogramInterval(timeInterval)); MetricConfig metricConfig = new MetricConfig(valueField, Collections.singletonList("avg")); RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.singletonList(metricConfig)); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml index 690383ce96e..1d4a190b24e 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml @@ -67,7 +67,12 @@ setup: --- "Test preview transform": + - skip: + reason: date histo interval is deprecated + features: "warnings" - do: + warnings: + - "[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future." data_frame.preview_data_frame_transform: body: > { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml index 5dda4f3def6..d8ee4926e97 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ml/datafeeds_crud.yml @@ -319,7 +319,7 @@ setup: "histogram_buckets":{ "date_histogram": { "field": "@timestamp", - "interval": "5m", + "fixed_interval": "5m", "time_zone": "UTC", "min_doc_count": 0 }, diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml index 1710e51c32b..2b8f44be286 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/delete_job.yml @@ -26,7 +26,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -54,7 +54,7 @@ setup: page_size: 10 groups : date_histogram: - interval: "1h" + calendar_interval: "1h" field: "the_field" time_zone: "UTC" metrics: @@ -107,7 +107,7 @@ setup: page_size: 10 groups : date_histogram: - interval: "1h" + calendar_interval: "1h" field: "the_field" time_zone: "UTC" metrics: @@ -160,7 +160,7 @@ setup: page_size: 10 groups : date_histogram: - interval: "1h" + calendar_interval: "1h" field: "the_field" time_zone: "UTC" metrics: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml index cd00a6f717b..c7e9da5aedd 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_jobs.yml @@ -29,7 +29,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -55,7 +55,7 @@ setup: page_size: 10 groups : date_histogram: - interval: "1h" + calendar_interval: "1h" field: "the_field" time_zone: "UTC" metrics: @@ -119,7 +119,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -145,7 +145,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -172,7 +172,7 @@ setup: page_size: 10 groups : date_histogram: - interval: "1h" + calendar_interval: "1h" field: "the_field" time_zone: "UTC" metrics: @@ -198,7 +198,7 @@ setup: page_size: 10 groups : date_histogram: - interval: "1h" + calendar_interval: "1h" field: "the_field" time_zone: "UTC" metrics: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml index 3d38f4a3712..42acd41097b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_caps.yml @@ -47,7 +47,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -74,7 +74,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -98,7 +98,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -121,7 +121,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -133,7 +133,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -157,7 +157,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -182,7 +182,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -206,7 +206,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -218,7 +218,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -233,7 +233,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml index e4b98b94920..cbed3770ef2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/get_rollup_index_caps.yml @@ -47,7 +47,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -74,7 +74,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -98,7 +98,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -121,7 +121,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -133,7 +133,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -158,7 +158,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -181,7 +181,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -205,7 +205,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -229,7 +229,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -254,7 +254,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -266,7 +266,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -280,7 +280,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -308,7 +308,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -332,7 +332,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -357,7 +357,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -369,7 +369,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -383,7 +383,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" @@ -407,7 +407,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -431,7 +431,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -456,7 +456,7 @@ setup: fields: the_field: - agg: "date_histogram" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" value_field: - agg: "min" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml index 7983778108b..7226dcb7e13 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/put_job.yml @@ -29,7 +29,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -55,7 +55,7 @@ setup: page_size: 10 groups : date_histogram: - interval: "1h" + calendar_interval: "1h" field: "the_field" time_zone: "UTC" metrics: @@ -97,7 +97,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -124,7 +124,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -155,7 +155,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -188,7 +188,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -217,7 +217,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ @@ -246,7 +246,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml index e93a0deb037..ca04327eab7 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/rollup_search.yml @@ -28,7 +28,7 @@ setup: "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h" + "calendar_interval": "1h" }, "terms": { "fields": ["partition"] @@ -135,7 +135,34 @@ setup: histo: date_histogram: field: "timestamp" - interval: "1h" + calendar_interval: "1h" + time_zone: "UTC" + + - length: { aggregations.histo.buckets: 4 } + - match: { aggregations.histo.buckets.0.key_as_string: "2017-01-01T05:00:00.000Z" } + - match: { aggregations.histo.buckets.0.doc_count: 1 } + - match: { aggregations.histo.buckets.1.key_as_string: "2017-01-01T06:00:00.000Z" } + - match: { aggregations.histo.buckets.1.doc_count: 2 } + - match: { aggregations.histo.buckets.2.key_as_string: "2017-01-01T07:00:00.000Z" } + - match: { aggregations.histo.buckets.2.doc_count: 10 } + - match: { aggregations.histo.buckets.3.key_as_string: "2017-01-01T08:00:00.000Z" } + - match: { aggregations.histo.buckets.3.doc_count: 20 } + +--- +"Basic Search with rest_total_hits_as_int": + - skip: + version: " - 6.5.99" + reason: rest_total_hits_as_int was introduced in 6.6.0 + - do: + rollup.rollup_search: + index: "foo_rollup" + body: + size: 0 + aggs: + histo: + date_histogram: + field: "timestamp" + calendar_interval: "1h" time_zone: "UTC" - length: { aggregations.histo.buckets: 4 } @@ -160,7 +187,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" format: "yyyy-MM-dd" @@ -218,7 +245,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" aggs: the_max: @@ -254,7 +281,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" aggs: the_max: @@ -291,7 +318,7 @@ setup: "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h" + "calendar_interval": "1h" }, "terms": { "fields": ["partition"] @@ -396,7 +423,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" aggs: the_max: @@ -434,7 +461,7 @@ setup: "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h" + "calendar_interval": "1h" }, "terms": { "fields": ["partition"] @@ -542,7 +569,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" aggs: the_max: @@ -579,7 +606,7 @@ setup: "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1d" + "calendar_interval": "1d" }, "terms": { "fields": ["partition"] @@ -686,7 +713,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" aggs: the_max: @@ -718,7 +745,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" - length: { aggregations.histo.buckets: 4 } @@ -761,7 +788,7 @@ setup: "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h" + "calendar_interval": "1h" }, "terms": { "fields": ["partition"] @@ -785,7 +812,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" @@ -806,7 +833,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" - length: { aggregations.histo.buckets: 4 } @@ -849,7 +876,7 @@ setup: "groups" : { "date_histogram": { "field": "timestamp", - "interval": "1h" + "calendar_interval": "1h" }, "terms": { "fields": ["partition"] @@ -878,7 +905,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" --- @@ -984,7 +1011,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "5m" + fixed_interval: "5m" time_zone: "America/Edmonton" aggs: the_max: @@ -1011,7 +1038,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "5m" + fixed_interval: "5m" time_zone: "Canada/Mountain" aggs: the_max: @@ -1137,7 +1164,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "5m" + fixed_interval: "5m" time_zone: "America/Edmonton" aggs: the_max: @@ -1165,7 +1192,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "5m" + fixed_interval: "5m" time_zone: "Canada/Mountain" aggs: the_max: @@ -1197,7 +1224,7 @@ setup: histo: date_histogram: field: "timestamp" - interval: "1h" + calendar_interval: "1h" time_zone: "UTC" aggs: the_max: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml index fbf9e851905..371f7c7207f 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/start_job.yml @@ -26,7 +26,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml index 7e8b6b3f61a..e7b81831c65 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/rollup/stop_job.yml @@ -26,7 +26,7 @@ setup: "groups" : { "date_histogram": { "field": "the_field", - "interval": "1h" + "calendar_interval": "1h" } }, "metrics": [ diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 322e97db765..d7355269a11 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -247,6 +247,13 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { // create the rollup job final Request createRollupJobRequest = new Request("PUT", getRollupEndpoint() + "/job/rollup-job-test"); + String intervalType; + if (getOldClusterVersion().onOrAfter(Version.V_7_2_0)) { + intervalType = "fixed_interval"; + } else { + intervalType = "interval"; + } + createRollupJobRequest.setJsonEntity("{" + "\"index_pattern\":\"rollup-*\"," + "\"rollup_index\":\"results-rollup\"," @@ -255,7 +262,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { + "\"groups\":{" + " \"date_histogram\":{" + " \"field\":\"timestamp\"," - + " \"interval\":\"5m\"" + + " \"" + intervalType + "\":\"5m\"" + " }" + "}," + "\"metrics\":[" diff --git a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java index a5579ad0aa5..604c9a17a5e 100644 --- a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java +++ b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java @@ -114,7 +114,7 @@ public class RollupIT extends ESRestTestCase { + "\"groups\":{" + " \"date_histogram\":{" + " \"field\":\"timestamp\"," - + " \"interval\":\"5m\"" + + " \"fixed_interval\":\"5m\"" + " }" + "}," + "\"metrics\":[" @@ -158,7 +158,7 @@ public class RollupIT extends ESRestTestCase { " \"date_histo\": {\n" + " \"date_histogram\": {\n" + " \"field\": \"timestamp\",\n" + - " \"interval\": \"60m\",\n" + + " \"fixed_interval\": \"60m\",\n" + " \"format\": \"date_time\"\n" + " },\n" + " \"aggs\": {\n" + diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 8b5d1f1ff72..58dac6b8f25 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -90,6 +90,7 @@ task copyTestNodeKeyMaterial(type: Copy) { for (Version version : bwcVersions.wireCompatible) { String baseName = "v${version}" + Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { mustRunAfter(precommit) @@ -165,6 +166,7 @@ for (Version version : bwcVersions.wireCompatible) { Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") oldClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'old_cluster' + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') } Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure getOtherUnicastHostAddresses -> @@ -221,6 +223,7 @@ for (Version version : bwcVersions.wireCompatible) { oneThirdUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'true' + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') // We only need to run these tests once so we may as well do it when we're two thirds upgraded systemProperty 'tests.rest.blacklist', [ 'mixed_cluster/10_basic/Start scroll in mixed cluster on upgraded node that we will continue after upgrade', @@ -241,6 +244,7 @@ for (Version version : bwcVersions.wireCompatible) { twoThirdsUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' systemProperty 'tests.first_round', 'false' + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" } @@ -253,6 +257,7 @@ for (Version version : bwcVersions.wireCompatible) { Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'upgraded_cluster' + systemProperty 'tests.upgrade_from_version', version.toString().replace('-SNAPSHOT', '') /* * Force stopping all the upgraded nodes after the test runner * so they are alive during the test. diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java new file mode 100644 index 00000000000..7c594b7eef5 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RollupDateHistoUpgradeIT.java @@ -0,0 +1,302 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.xcontent.ObjectPath; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.hamcrest.Matcher; + +import java.io.IOException; +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; + + +public class RollupDateHistoUpgradeIT extends AbstractUpgradeTestCase { + private static final Version UPGRADE_FROM_VERSION = + Version.fromString(System.getProperty("tests.upgrade_from_version")); + + public void testDateHistoIntervalUpgrade() throws Exception { + switch (CLUSTER_TYPE) { + case OLD: + break; + case MIXED: + Request waitForYellow = new Request("GET", "/_cluster/health"); + waitForYellow.addParameter("wait_for_nodes", "3"); + waitForYellow.addParameter("wait_for_status", "yellow"); + client().performRequest(waitForYellow); + break; + case UPGRADED: + Request waitForGreen = new Request("GET", "/_cluster/health/target,rollup"); + waitForGreen.addParameter("wait_for_nodes", "3"); + waitForGreen.addParameter("wait_for_status", "green"); + // wait for long enough that we give delayed unassigned shards to stop being delayed + waitForGreen.addParameter("timeout", "70s"); + waitForGreen.addParameter("level", "shards"); + client().performRequest(waitForGreen); + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + OffsetDateTime timestamp = Instant.parse("2018-01-01T00:00:01.000Z").atOffset(ZoneOffset.UTC); + + if (CLUSTER_TYPE == ClusterType.OLD) { + String rollupEndpoint = UPGRADE_FROM_VERSION.before(Version.V_7_0_0) ? "_xpack/rollup" : "_rollup"; + + String settings = "{\"settings\": {\"index.unassigned.node_left.delayed_timeout\": \"100ms\", \"number_of_shards\": 1}}"; + + Request createTargetIndex = new Request("PUT", "/target"); + createTargetIndex.setJsonEntity(settings); + client().performRequest(createTargetIndex); + + final Request indexRequest = new Request("POST", "/target/_doc/1"); + indexRequest.setJsonEntity("{\"timestamp\":\"" + timestamp.toString() + "\",\"value\":123}"); + client().performRequest(indexRequest); + client().performRequest(new Request("POST", "target/_refresh")); + + // create the rollup job with an old interval style + final Request createRollupJobRequest = new Request("PUT", rollupEndpoint + "/job/rollup-id-test"); + createRollupJobRequest.setJsonEntity("{" + + "\"index_pattern\":\"target\"," + + "\"rollup_index\":\"rollup\"," + + "\"cron\":\"*/1 * * * * ?\"," + + "\"page_size\":100," + + "\"groups\":{" + + " \"date_histogram\":{" + + " \"field\":\"timestamp\"," + + " \"interval\":\"5m\"" + + " }," + + "\"histogram\":{" + + " \"fields\": [\"value\"]," + + " \"interval\":1" + + " }," + + "\"terms\":{" + + " \"fields\": [\"value\"]" + + " }" + + "}," + + "\"metrics\":[" + + " {\"field\":\"value\",\"metrics\":[\"min\",\"max\",\"sum\"]}" + + "]" + + "}"); + RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); + options.setWarningsHandler(warnings -> { + warnings.remove("the default number of shards will change from [5] to [1] in 7.0.0; if you wish to continue using " + + "the default of [5] shards, you must manage this on the create index request or with an index template"); + return warnings.size() > 0; + }); + createRollupJobRequest.setOptions(options); + + Map createRollupJobResponse = entityAsMap(client().performRequest(createRollupJobRequest)); + assertThat(createRollupJobResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + + String recoverQuickly = "{\"settings\": {\"index.unassigned.node_left.delayed_timeout\": \"100ms\"}}"; + Request updateSettings = new Request("PUT", "/rollup/_settings"); + updateSettings.setJsonEntity(recoverQuickly); + client().performRequest(updateSettings); + + // start the rollup job + final Request startRollupJobRequest = new Request("POST", rollupEndpoint + "/job/rollup-id-test/_start"); + Map startRollupJobResponse = entityAsMap(client().performRequest(startRollupJobRequest)); + assertThat(startRollupJobResponse.get("started"), equalTo(Boolean.TRUE)); + + assertRollUpJob("rollup-id-test", rollupEndpoint); + List ids = getSearchResults(1); + assertThat(ids.toString(), ids, containsInAnyOrder("rollup-id-test$AuaduUZW8tgWmFP87DgzSA")); + } + + if (CLUSTER_TYPE == ClusterType.MIXED && Booleans.parseBoolean(System.getProperty("tests.first_round"))) { + final Request indexRequest = new Request("POST", "/target/_doc/2"); + indexRequest.setJsonEntity("{\"timestamp\":\"" + timestamp.plusDays(1).toString() + "\",\"value\":345}"); + client().performRequest(indexRequest); + client().performRequest(new Request("POST", "target/_refresh")); + + assertRollUpJob("rollup-id-test", "_xpack/rollup"); + + List ids = getSearchResults(2); + assertThat(ids.toString(), ids, containsInAnyOrder("rollup-id-test$AuaduUZW8tgWmFP87DgzSA", + "rollup-id-test$ehY4NAyVSy8xxUDZrNXXIA")); + } + + if (CLUSTER_TYPE == ClusterType.MIXED && Booleans.parseBoolean(System.getProperty("tests.first_round")) == false) { + final Request indexRequest = new Request("POST", "/target/_doc/3"); + indexRequest.setJsonEntity("{\"timestamp\":\"" + timestamp.plusDays(2).toString() + "\",\"value\":456}"); + client().performRequest(indexRequest); + + client().performRequest(new Request("POST", "target/_refresh")); + + assertRollUpJob("rollup-id-test", "_xpack/rollup"); + client().performRequest(new Request("POST", "rollup/_refresh")); + + List ids = getSearchResults(3); + assertThat(ids.toString(), ids, containsInAnyOrder("rollup-id-test$AuaduUZW8tgWmFP87DgzSA", + "rollup-id-test$ehY4NAyVSy8xxUDZrNXXIA", "rollup-id-test$60RGDSb92YI5LH4_Fnq_1g")); + + } + + if (CLUSTER_TYPE == ClusterType.UPGRADED) { + final Request indexRequest = new Request("POST", "/target/_doc/4"); + indexRequest.setJsonEntity("{\"timestamp\":\"" + timestamp.plusDays(3).toString() + "\",\"value\":567}"); + client().performRequest(indexRequest); + client().performRequest(new Request("POST", "target/_refresh")); + + assertRollUpJob("rollup-id-test", "_rollup"); + + List ids = getSearchResults(4); + assertThat(ids.toString(), ids, containsInAnyOrder("rollup-id-test$AuaduUZW8tgWmFP87DgzSA", + "rollup-id-test$ehY4NAyVSy8xxUDZrNXXIA", "rollup-id-test$60RGDSb92YI5LH4_Fnq_1g", "rollup-id-test$LAKZftDeQwsUtdPixrkkzQ")); + } + + } + + private List getSearchResults(int expectedCount) throws Exception { + final List collectedIDs = new ArrayList<>(); + + assertBusy(() -> { + collectedIDs.clear(); + client().performRequest(new Request("POST", "rollup/_refresh")); + final Request searchRequest = new Request("GET", "rollup/_search"); + try { + Map searchResponse = entityAsMap(client().performRequest(searchRequest)); + logger.error(searchResponse); + + Object hits = ObjectPath.eval("hits.total", searchResponse); + assertNotNull(hits); + if (hits instanceof Number) { + assertThat(ObjectPath.eval("hits.total", searchResponse), equalTo(expectedCount)); + } else { + assertThat(ObjectPath.eval("hits.total.value", searchResponse), equalTo(expectedCount)); + } + + for (int i = 0; i < expectedCount; i++) { + String id = ObjectPath.eval("hits.hits." + i + "._id", searchResponse); + collectedIDs.add(id); + Map doc = ObjectPath.eval("hits.hits." + i + "._source", searchResponse); + assertNotNull(doc); + } + } catch (IOException e) { + fail(); + } + }); + return collectedIDs; + } + + @SuppressWarnings("unchecked") + private void assertRollUpJob(final String rollupJob, String endpoint) throws Exception { + final Matcher expectedStates = anyOf(equalTo("indexing"), equalTo("started")); + waitForRollUpJob(rollupJob, expectedStates, endpoint); + + // check that the rollup job is started using the RollUp API + final Request getRollupJobRequest = new Request("GET", endpoint + "/job/" + rollupJob); + // Hard to know which node we are talking to, so just remove this deprecation warning if we're hitting + // the old endpoint + if (endpoint.equals("_xpack/rollup")) { + RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); + options.setWarningsHandler(warnings -> { + warnings.remove("[GET /_xpack/rollup/job/{id}/] is deprecated! Use [GET /_rollup/job/{id}] instead."); + return warnings.size() > 0; + }); + getRollupJobRequest.setOptions(options); + } + + Map getRollupJobResponse = entityAsMap(client().performRequest(getRollupJobRequest)); + Map job = getJob(getRollupJobResponse, rollupJob); + if (job != null) { + assertThat(ObjectPath.eval("status.job_state", job), expectedStates); + } + + // check that the rollup job is started using the Tasks API + final Request taskRequest = new Request("GET", "_tasks"); + taskRequest.addParameter("detailed", "true"); + taskRequest.addParameter("actions", "xpack/rollup/*"); + Map taskResponse = entityAsMap(client().performRequest(taskRequest)); + Map taskResponseNodes = (Map) taskResponse.get("nodes"); + Map taskResponseNode = (Map) taskResponseNodes.values().iterator().next(); + Map taskResponseTasks = (Map) taskResponseNode.get("tasks"); + Map taskResponseStatus = (Map) taskResponseTasks.values().iterator().next(); + assertThat(ObjectPath.eval("status.job_state", taskResponseStatus), expectedStates); + + // check that the rollup job is started using the Cluster State API + final Request clusterStateRequest = new Request("GET", "_cluster/state/metadata"); + Map clusterStateResponse = entityAsMap(client().performRequest(clusterStateRequest)); + List> rollupJobTasks = ObjectPath.eval("metadata.persistent_tasks.tasks", clusterStateResponse); + + boolean hasRollupTask = false; + for (Map task : rollupJobTasks) { + if (ObjectPath.eval("id", task).equals(rollupJob)) { + hasRollupTask = true; + break; + } + } + if (hasRollupTask == false) { + fail("Expected persistent task for [" + rollupJob + "] but none found."); + } + + } + + private void waitForRollUpJob(final String rollupJob, final Matcher expectedStates, String endpoint) throws Exception { + assertBusy(() -> { + final Request getRollupJobRequest = new Request("GET", endpoint + "/job/" + rollupJob); + + // Hard to know which node we are talking to, so just remove this deprecation warning if we're hitting + // the old endpoint + if (endpoint.equals("_xpack/rollup")) { + RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); + options.setWarningsHandler(warnings -> { + logger.error(warnings); + warnings.remove("[GET /_xpack/rollup/job/{id}/] is deprecated! Use [GET /_rollup/job/{id}] instead."); + return warnings.size() > 0; + }); + getRollupJobRequest.setOptions(options); + } + Response getRollupJobResponse = client().performRequest(getRollupJobRequest); + assertThat(getRollupJobResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + + Map job = getJob(getRollupJobResponse, rollupJob); + if (job != null) { + assertThat(ObjectPath.eval("status.job_state", job), expectedStates); + } + }, 30L, TimeUnit.SECONDS); + } + + private static Map getJob(Response response, String targetJobId) throws IOException { + return getJob(ESRestTestCase.entityAsMap(response), targetJobId); + } + + @SuppressWarnings("unchecked") + private static Map getJob(Map jobsMap, String targetJobId) throws IOException { + + List> jobs = + (List>) XContentMapValues.extractValue("jobs", jobsMap); + + if (jobs == null) { + return null; + } + + for (Map job : jobs) { + String jobId = (String) ((Map) job.get("config")).get("id"); + if (jobId.equals(targetJobId)) { + return job; + } + } + return null; + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml index 18e9f66603a..e9790e69b3c 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -17,7 +17,11 @@ --- "Test old cluster datafeed with aggs": + - skip: + features: "warnings" - do: + warnings: + - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' ml.get_datafeeds: datafeed_id: old-cluster-datafeed-with-aggs - match: { datafeeds.0.datafeed_id: "old-cluster-datafeed-with-aggs"} @@ -81,6 +85,8 @@ --- "Put job and datafeed with aggs in mixed cluster": + - skip: + features: "warnings" - do: ml.put_job: @@ -103,6 +109,8 @@ } - do: + warnings: + - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' ml.put_datafeed: datafeed_id: mixed-cluster-datafeed-with-aggs body: > diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml index 597540d36c4..bce9c25c08c 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml @@ -46,7 +46,10 @@ - is_false: datafeeds.0.node --- -"Put job and datafeed with aggs in old cluster": +"Put job and datafeed with aggs in old cluster - pre-deprecated interval": + - skip: + version: "8.0.0 - " #TODO change this after backport + reason: calendar_interval introduced in 7.1.0 - do: ml.put_job: @@ -111,3 +114,76 @@ datafeed_id: old-cluster-datafeed-with-aggs - match: { datafeeds.0.state: stopped} - is_false: datafeeds.0.node + +--- +"Put job and datafeed with aggs in old cluster - deprecated interval with warning": + - skip: + version: " - 7.99.99" #TODO change this after backport + reason: calendar_interval introduced in 7.1.0 + features: warnings + + - do: + ml.put_job: + job_id: old-cluster-datafeed-job-with-aggs + body: > + { + "description":"Cluster upgrade", + "analysis_config" : { + "bucket_span": "60s", + "summary_count_field_name": "doc_count", + "detectors" :[{"function":"count"}] + }, + "analysis_limits" : { + "model_memory_limit": "50mb" + }, + "data_description" : { + "format":"xcontent", + "time_field":"time" + } + } + - match: { job_id: old-cluster-datafeed-job-with-aggs } + + - do: + warnings: + - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' + ml.put_datafeed: + datafeed_id: old-cluster-datafeed-with-aggs + body: > + { + "job_id":"old-cluster-datafeed-job-with-aggs", + "indices":["airline-data"], + "scroll_size": 2000, + "aggregations": { + "buckets": { + "date_histogram": { + "field": "time", + "interval": "30s", + "time_zone": "UTC" + }, + "aggregations": { + "time": { + "max": {"field": "time"} + }, + "airline": { + "terms": { + "field": "airline", + "size": 100 + }, + "aggregations": { + "responsetime": { + "avg": { + "field": "responsetime" + } + } + } + } + } + } + } + } + + - do: + ml.get_datafeed_stats: + datafeed_id: old-cluster-datafeed-with-aggs + - match: { datafeeds.0.state: stopped} + - is_false: datafeeds.0.node diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index f2dbb2e80dc..089c689d900 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -105,6 +105,8 @@ setup: --- "Test old and mixed cluster datafeeds with aggs": + - skip: + features: "warnings" - do: indices.create: index: airline-data @@ -115,6 +117,8 @@ setup: type: date - do: + warnings: + - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' ml.get_datafeeds: datafeed_id: old-cluster-datafeed-with-aggs - match: { datafeeds.0.datafeed_id: "old-cluster-datafeed-with-aggs"} @@ -131,6 +135,8 @@ setup: - is_false: datafeeds.0.node - do: + warnings: + - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' ml.get_datafeeds: datafeed_id: mixed-cluster-datafeed-with-aggs - match: { datafeeds.0.datafeed_id: "mixed-cluster-datafeed-with-aggs"} @@ -151,6 +157,8 @@ setup: job_id: old-cluster-datafeed-job-with-aggs - do: + warnings: + - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' ml.start_datafeed: datafeed_id: old-cluster-datafeed-with-aggs start: 0 @@ -177,6 +185,8 @@ setup: job_id: mixed-cluster-datafeed-job-with-aggs - do: + warnings: + - '[interval] on [date_histogram] is deprecated, use [fixed_interval] or [calendar_interval] in the future.' ml.start_datafeed: datafeed_id: mixed-cluster-datafeed-with-aggs start: 0 From be515d7ce0db7d9143bbe8dbc799332d39c64009 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 20 May 2019 11:35:31 -0700 Subject: [PATCH 08/25] Validate non-secure settings are not in keystore (#42209) Secure settings currently error if they exist inside elasticsearch.yml. This commit adds validation that non-secure settings do not exist inside the keystore. closes #41831 --- .../java/org/elasticsearch/common/settings/Setting.java | 5 +++++ .../org/elasticsearch/common/settings/SettingTests.java | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 9c3762f857e..11a24052266 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -467,6 +467,11 @@ public class Setting implements ToXContentObject { * @return the raw string representation of the setting value */ String innerGetRaw(final Settings settings) { + SecureSettings secureSettings = settings.getSecureSettings(); + if (secureSettings != null && secureSettings.getSettingNames().contains(getKey())) { + throw new IllegalArgumentException("Setting [" + getKey() + "] is a non-secure setting" + + " and must be stored inside elasticsearch.yml, but was found inside the Elasticsearch keystore"); + } return settings.get(getKey(), defaultValue.apply(settings)); } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 220392a952c..b2f73db90f7 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -964,4 +964,13 @@ public class SettingTests extends ESTestCase { assertEquals("", value); } + public void testNonSecureSettingInKeystore() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("foo", "bar"); + final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + Setting setting = Setting.simpleString("foo", Property.NodeScope); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> setting.get(settings)); + assertThat(e.getMessage(), containsString("must be stored inside elasticsearch.yml")); + } + } From 87bff89500b93fe46fcf088c7e4238c06cac73cb Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Mon, 20 May 2019 14:39:17 -0400 Subject: [PATCH 09/25] 7.1.0 release notes forward port (#42252) Forward port of #42208 --- docs/reference/migration/migrate_7_1.asciidoc | 2 - docs/reference/release-notes.asciidoc | 4 +- .../{7.0.0.asciidoc => 7.0.asciidoc} | 0 docs/reference/release-notes/7.1.0.asciidoc | 52 ------------------- docs/reference/release-notes/7.1.asciidoc | 45 ++++++++++++++++ .../release-notes/highlights-7.1.0.asciidoc | 36 +++++++++++-- 6 files changed, 78 insertions(+), 61 deletions(-) rename docs/reference/release-notes/{7.0.0.asciidoc => 7.0.asciidoc} (100%) delete mode 100644 docs/reference/release-notes/7.1.0.asciidoc create mode 100644 docs/reference/release-notes/7.1.asciidoc diff --git a/docs/reference/migration/migrate_7_1.asciidoc b/docs/reference/migration/migrate_7_1.asciidoc index 57b63ffb77e..8d77085e5da 100644 --- a/docs/reference/migration/migrate_7_1.asciidoc +++ b/docs/reference/migration/migrate_7_1.asciidoc @@ -9,8 +9,6 @@ your application to Elasticsearch 7.1. See also <> and <>. -coming[7.1.0] - //NOTE: The notable-breaking-changes tagged regions are re-used in the //Installation and Upgrade Guide diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index ab5dc60b019..b912d7b69f1 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -16,8 +16,8 @@ This section summarizes the changes in each release. -- -include::release-notes/7.1.0.asciidoc[] -include::release-notes/7.0.0.asciidoc[] +include::release-notes/7.1.asciidoc[] +include::release-notes/7.0.asciidoc[] include::release-notes/7.0.0-rc2.asciidoc[] include::release-notes/7.0.0-rc1.asciidoc[] include::release-notes/7.0.0-beta1.asciidoc[] diff --git a/docs/reference/release-notes/7.0.0.asciidoc b/docs/reference/release-notes/7.0.asciidoc similarity index 100% rename from docs/reference/release-notes/7.0.0.asciidoc rename to docs/reference/release-notes/7.0.asciidoc diff --git a/docs/reference/release-notes/7.1.0.asciidoc b/docs/reference/release-notes/7.1.0.asciidoc deleted file mode 100644 index 8ab37f875c2..00000000000 --- a/docs/reference/release-notes/7.1.0.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ -//// -// To add a release, copy and paste the following text, uncomment the relevant -// sections, and add a link to the new section in the list of releases in -// ../release-notes.asciidoc. Note that release subheads must be floated and -// sections cannot be empty. -// TEMPLATE - -// [[release-notes-n.n.n]] -// == {es} version n.n.n - -// coming[n.n.n] - -// Also see <>. - -// [float] -// [[breaking-n.n.n]] -// === Breaking Changes - -// [float] -// [[breaking-java-n.n.n]] -// === Breaking Java Changes - -// [float] -// [[deprecation-n.n.n]] -// === Deprecations - -// [float] -// [[feature-n.n.n]] -// === New Features - -// [float] -// [[enhancement-n.n.n]] -// === Enhancements - -// [float] -// [[bug-n.n.n]] -// === Bug Fixes - -// [float] -// [[regression-n.n.n]] -// === Regressions - -// [float] -// === Known Issues -//// - -[[release-notes-7.1.0]] -== {es} version 7.1.0 - -Also see <>. - -coming[7.1.0] \ No newline at end of file diff --git a/docs/reference/release-notes/7.1.asciidoc b/docs/reference/release-notes/7.1.asciidoc new file mode 100644 index 00000000000..63d9f61b404 --- /dev/null +++ b/docs/reference/release-notes/7.1.asciidoc @@ -0,0 +1,45 @@ +[[release-notes-7.1.0]] +== {es} version 7.1.0 + +Also see <>. + +[[enhancement-7.1.0]] +[float] +=== Enhancements + +Security:: +* Moved some security features to basic. See <> + +Authentication:: +* Log warning when unlicensed realms are skipped {pull}41778[#41778] + +Infra/Settings:: +* Drop distinction in entries for keystore {pull}41701[#41701] + + +[[bug-7.1.0]] +[float] +=== Bug fixes + +Cluster Coordination:: +* Handle serialization exceptions during publication {pull}41781[#41781] (issue: {issue}41090[#41090]) + +Infra/Core:: +* Fix fractional seconds for strict_date_optional_time {pull}41871[#41871] (issue: {issue}41633[#41633]) + +Network:: +* Enforce transport TLS on Basic with Security {pull}42150[#42150] + +Reindex:: +* Allow reindexing into write alias {pull}41677[#41677] (issue: {issue}41667[#41667]) + +SQL:: +* SQL: Fix issue regarding INTERVAL * number {pull}42014[#42014] (issue: {issue}41239[#41239]) +* SQL: Remove CircuitBreaker from parser {pull}41835[#41835] (issue: {issue}41471[#41471]) + +Search:: +* Fix IAE on cross_fields query introduced in 7.0.1 {pull}41938[#41938] (issues: {issue}41125[#41125], {issue}41934[#41934]) + + + + diff --git a/docs/reference/release-notes/highlights-7.1.0.asciidoc b/docs/reference/release-notes/highlights-7.1.0.asciidoc index 91576e17cd1..26132c1daf4 100644 --- a/docs/reference/release-notes/highlights-7.1.0.asciidoc +++ b/docs/reference/release-notes/highlights-7.1.0.asciidoc @@ -4,11 +4,37 @@ 7.1.0 ++++ -coming[7.1.0] +See also <>. -//NOTE: The notable-highlights tagged regions are re-used in the -//Installation and Upgrade Guide +//tag::notable-highlights[] +[float] +==== TLS is now licensed under the Elastic Basic license -// tag::notable-highlights[] +Transport Layer Security (TLS), commonly referred to as SSL, is now +licensed under the free-of-charge Elastic Basic license. Previously, this security feature +required a paid Gold-tier subscription. With the default distribution, +you can now encrypt all Elasticsearch communication, within a cluster and across remotes +clusters. Download https://www.elastic.co/downloads/elasticsearch[Elasticsearch], +https://www.elastic.co/guide/en/elasticsearch/reference/7.1/configuring-tls.html[configure TLS], +and run your cluster in production, knowing all Elasticsearch communication is safely encrypted. +For details, see https://www.elastic.co/subscriptions +//end::notable-highlights[] -// end::notable-highlights[] +//tag::notable-highlights[] +[float] +==== RBAC is now licensed under the Elastic Basic license + +RBAC (Role Based Access Control) is now licenced under the free-of-charge Elastic Basic licence. +Previously, this security feature required a paid Gold-tier subscription. +With the default distribution you can take advantage of RBAC by configuring users, groups, roles +and permissions for any user from the +https://www.elastic.co/guide/en/elasticsearch/reference/7.1/configuring-file-realm.html[file realm] +or the https://www.elastic.co/guide/en/elasticsearch/reference/7.1/configuring-native-realm.html[native realm] +. Download https://www.elastic.co/downloads/elasticsearch[Elasticsearch], +https://www.elastic.co/guide/en/elastic-stack-overview/7.1/authorization.html[configure RBAC], +and run your cluster in production, knowing your private data stays private. +Note that our advanced security features, such as single sign-on and Active Directory/LDAP +authentication to field-level and document-level security, remain paid features. +For details, see https://www.elastic.co/subscriptions + +//end::notable-highlights[] From df8fef3c1ad2eb1ebc6aef3f458f6c5454ccdc68 Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Mon, 20 May 2019 15:32:44 -0400 Subject: [PATCH 10/25] fix assumption that 6.7 is last 6.x release (#42255) --- docs/reference/upgrade.asciidoc | 6 +++--- docs/reference/upgrade/cluster_restart.asciidoc | 4 ++-- docs/reference/upgrade/rolling_upgrade.asciidoc | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/reference/upgrade.asciidoc b/docs/reference/upgrade.asciidoc index e5e447aff75..b28abe7c948 100644 --- a/docs/reference/upgrade.asciidoc +++ b/docs/reference/upgrade.asciidoc @@ -7,8 +7,8 @@ process so upgrading does not interrupt service. Rolling upgrades are supported: * Between minor versions -* From 5.6 to 6.7 -* From 6.7 to {version} +* From 5.6 to 6.8 +* From 6.8 to {version} {es} can read indices created in the previous major version. If you have indices created in 5.x or before, you must reindex or delete them @@ -21,7 +21,7 @@ When upgrading to a new version of {es}, you need to upgrade each of the products in your Elastic Stack. For more information, see the {stack-ref}/upgrading-elastic-stack.html[Elastic Stack Installation and Upgrade Guide]. -To upgrade directly to {version} from 6.6 or earlier, you must shut down the +To upgrade directly to {version} from 6.7 or earlier, you must shut down the cluster, install {version}, and restart. For more information, see <>. diff --git a/docs/reference/upgrade/cluster_restart.asciidoc b/docs/reference/upgrade/cluster_restart.asciidoc index 1865f005dcc..50e70c08ded 100644 --- a/docs/reference/upgrade/cluster_restart.asciidoc +++ b/docs/reference/upgrade/cluster_restart.asciidoc @@ -1,11 +1,11 @@ [[restart-upgrade]] == Full cluster restart upgrade -To upgrade directly to {es} {version} from versions 6.0-6.6, you must shut down +To upgrade directly to {es} {version} from versions 6.0-6.7, you must shut down all nodes in the cluster, upgrade each node to {version}, and restart the cluster. NOTE: If you are running a version prior to 6.0, -https://www.elastic.co/guide/en/elastic-stack/6.7/upgrading-elastic-stack.html[upgrade to 6.7] +https://www.elastic.co/guide/en/elastic-stack/6.8/upgrading-elastic-stack.html[upgrade to 6.8] and reindex your old indices or bring up a new {version} cluster and <>. diff --git a/docs/reference/upgrade/rolling_upgrade.asciidoc b/docs/reference/upgrade/rolling_upgrade.asciidoc index 789851ac7cf..f1a7e2da58b 100644 --- a/docs/reference/upgrade/rolling_upgrade.asciidoc +++ b/docs/reference/upgrade/rolling_upgrade.asciidoc @@ -10,13 +10,13 @@ running the older version. Rolling upgrades are supported: * Between minor versions -* https://www.elastic.co/guide/en/elastic-stack/6.7/upgrading-elastic-stack.html[From 5.6 to 6.7] -* From 6.7 to {version} +* https://www.elastic.co/guide/en/elastic-stack/6.8/upgrading-elastic-stack.html[From 5.6 to 6.8] +* From 6.8 to {version} -Upgrading directly to {version} from 6.6 or earlier requires a +Upgrading directly to {version} from 6.7 or earlier requires a <>. -To perform a rolling upgrade from 6.7 to {version}: +To perform a rolling upgrade from 6.8 to {version}: . *Disable shard allocation*. + From b0a25c3170d61e1cc2b2e901caf102b83b88845a Mon Sep 17 00:00:00 2001 From: Jake Landis Date: Mon, 20 May 2019 17:58:24 -0500 Subject: [PATCH 11/25] add 7.1.1 and 6.8.1 versions (#42251) --- server/src/main/java/org/elasticsearch/Version.java | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 7b1e9f425e3..b0bfe9a44b3 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -128,10 +128,10 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_7_1 = new Version(V_6_7_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_6_7_2_ID = 6070299; public static final Version V_6_7_2 = new Version(V_6_7_2_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); - public static final int V_6_7_3_ID = 6070399; - public static final Version V_6_7_3 = new Version(V_6_7_3_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_6_8_0_ID = 6080099; public static final Version V_6_8_0 = new Version(V_6_8_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); + public static final int V_6_8_1_ID = 6080199; + public static final Version V_6_8_1 = new Version(V_6_8_1_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_7_0_0_ID = 7000099; public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_0_1_ID = 7000199; @@ -140,6 +140,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_0_2 = new Version(V_7_0_2_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_0_ID = 7010099; public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final int V_7_1_1_ID = 7010199; + public static final Version V_7_1_1 = new Version(V_7_1_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_2_0_ID = 7020099; public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final Version CURRENT = V_7_2_0; @@ -157,6 +159,8 @@ public class Version implements Comparable, ToXContentFragment { switch (id) { case V_7_2_0_ID: return V_7_2_0; + case V_7_1_1_ID: + return V_7_1_1; case V_7_1_0_ID: return V_7_1_0; case V_7_0_2_ID: @@ -165,10 +169,10 @@ public class Version implements Comparable, ToXContentFragment { return V_7_0_1; case V_7_0_0_ID: return V_7_0_0; + case V_6_8_1_ID: + return V_6_8_1; case V_6_8_0_ID: return V_6_8_0; - case V_6_7_3_ID: - return V_6_7_3; case V_6_7_1_ID: return V_6_7_1; case V_6_7_2_ID: From ec63160243e1b4a888d567fe4efd6dff9717a7c1 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 20 May 2019 09:55:35 -0400 Subject: [PATCH 12/25] Fix max boundary for rollups job that use a delay (#42158) Rollup jobs can define how long they should wait before rolling up new documents. However if the delay is smaller or if it's not a multiple of the rollup interval the job can create incomplete buckets because the max boundary for a job is computed from the time when the job started rounded to the interval minus the delay. This change fixes this computation by applying the delay substraction before the rounding in order to ensure that we never create a boundary that falls in a middle of a bucket. --- .../xpack/rollup/job/RollupIndexer.java | 13 ++--- .../job/RollupIndexerIndexingTests.java | 52 +++++++++++++++++++ 2 files changed, 57 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java index 5a16be1456a..83ec16e85ff 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java @@ -110,15 +110,12 @@ public abstract class RollupIndexer extends AsyncTwoPhaseIndexer listener) { try { - // this is needed to exclude buckets that can still receive new documents. + // this is needed to exclude buckets that can still receive new documents DateHistogramGroupConfig dateHisto = job.getConfig().getGroupConfig().getDateHistogram(); - long rounded = dateHisto.createRounding().round(now); - if (dateHisto.getDelay() != null) { - // if the job has a delay we filter all documents that appear before it. - maxBoundary = rounded - TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis(); - } else { - maxBoundary = rounded; - } + // if the job has a delay we filter all documents that appear before it + long delay = dateHisto.getDelay() != null ? + TimeValue.parseTimeValue(dateHisto.getDelay().toString(), "").millis() : 0; + maxBoundary = dateHisto.createRounding().round(now - delay); listener.onResponse(null); } catch (Exception e) { listener.onFailure(e); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index b0b6dc83337..f2797413c80 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -328,6 +328,58 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { }); } + public void testSimpleDateHistoWithOverlappingDelay() throws Exception { + String rollupIndex = randomAlphaOfLengthBetween(5, 10); + String field = "the_histo"; + DateHistogramGroupConfig dateHistoConfig = + new DateHistogramGroupConfig(field, new DateHistogramInterval("1h"), new DateHistogramInterval("15m"), null); + RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.emptyList()); + final List> dataset = new ArrayList<>(); + long now = asLong("2015-04-01T10:30:00.000Z"); + dataset.addAll( + Arrays.asList( + asMap("the_histo", now - TimeValue.timeValueMinutes(135).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(120).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(105).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(90).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(75).getMillis()), + asMap("the_histo", now - TimeValue.timeValueHours(1).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(45).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(30).getMillis()), + asMap("the_histo", now - TimeValue.timeValueMinutes(15).getMillis()), + asMap("the_histo", now) + ) + ); + final Rounding rounding = dateHistoConfig.createRounding(); + executeTestCase(dataset, job, now, (resp) -> { + assertThat(resp.size(), equalTo(2)); + IndexRequest request = resp.get(0); + assertThat(request.index(), equalTo(rollupIndex)); + assertThat(request.sourceAsMap(), equalTo( + asMap( + "_rollup.version", newIDScheme ? 2 : 1, + "the_histo.date_histogram.timestamp", rounding.round(now - TimeValue.timeValueHours(2).getMillis()), + "the_histo.date_histogram.interval", "1h", + "the_histo.date_histogram._count", 3, + "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), + "_rollup.id", job.getId() + ) + )); + request = resp.get(1); + assertThat(request.index(), equalTo(rollupIndex)); + assertThat(request.sourceAsMap(), equalTo( + asMap( + "_rollup.version", newIDScheme ? 2 : 1, + "the_histo.date_histogram.timestamp", rounding.round(now - TimeValue.timeValueHours(1).getMillis()), + "the_histo.date_histogram.interval", "1h", + "the_histo.date_histogram._count", 4, + "the_histo.date_histogram.time_zone", DateTimeZone.UTC.toString(), + "_rollup.id", job.getId() + ) + )); + }); + } + public void testSimpleDateHistoWithTimeZone() throws Exception { final List> dataset = new ArrayList<>(); long now = asLong("2015-04-01T10:00:00.000Z"); From 7abeaba8bb374130283978b7505c1943ef580ad6 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 21 May 2019 07:52:01 +0100 Subject: [PATCH 13/25] Prevent in-place downgrades and invalid upgrades (#41731) Downgrading an Elasticsearch node to an earlier version is unsupported, because we do not make any attempt to guarantee that a node can read any of the on-disk data written by a future version. Yet today we do not actively prevent downgrades, and sometimes users will attempt to roll back a failed upgrade with an in-place downgrade and get into an unrecoverable state. This change adds the current version of the node to the node metadata file, and checks the version found in this file against the current version at startup. If the node cannot be sure of its ability to read the on-disk data then it refuses to start, preserving any on-disk data in its upgraded state. This change also adds a command-line tool to overwrite the node metadata file without performing any version checks, to unsafely bypass these checks and recover the historical and lenient behaviour. --- docs/reference/commands/node-tool.asciidoc | 63 ++++++- .../ElasticsearchNodeCommand.java | 19 ++- .../cluster/coordination/NodeToolCli.java | 2 + .../elasticsearch/env/NodeEnvironment.java | 11 +- .../org/elasticsearch/env/NodeMetaData.java | 72 ++++++-- .../env/NodeRepurposeCommand.java | 12 -- .../env/OverrideNodeVersionCommand.java | 103 ++++++++++++ .../gateway/MetaDataStateFormat.java | 4 +- .../elasticsearch/env/NodeEnvironmentIT.java | 37 +++++ .../elasticsearch/env/NodeMetaDataTests.java | 118 +++++++++++++ .../env/OverrideNodeVersionCommandTests.java | 155 ++++++++++++++++++ .../env/testReadsFormatWithoutVersion.binary | Bin 0 -> 71 bytes 12 files changed, 556 insertions(+), 40 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java create mode 100644 server/src/test/java/org/elasticsearch/env/NodeMetaDataTests.java create mode 100644 server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java create mode 100644 server/src/test/resources/org/elasticsearch/env/testReadsFormatWithoutVersion.binary diff --git a/docs/reference/commands/node-tool.asciidoc b/docs/reference/commands/node-tool.asciidoc index f070d11aa8f..ed810a4dac0 100644 --- a/docs/reference/commands/node-tool.asciidoc +++ b/docs/reference/commands/node-tool.asciidoc @@ -4,14 +4,15 @@ The `elasticsearch-node` command enables you to perform certain unsafe operations on a node that are only possible while it is shut down. This command allows you to adjust the <> of a node and may be able to -recover some data after a disaster. +recover some data after a disaster or start a node even if it is incompatible +with the data on disk. [float] === Synopsis [source,shell] -------------------------------------------------- -bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster +bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster|override-version [--ordinal ] [-E ] [-h, --help] ([-s, --silent] | [-v, --verbose]) -------------------------------------------------- @@ -19,7 +20,7 @@ bin/elasticsearch-node repurpose|unsafe-bootstrap|detach-cluster [float] === Description -This tool has three modes: +This tool has four modes: * `elasticsearch-node repurpose` can be used to delete unwanted data from a node if it used to be a <> or a @@ -36,6 +37,11 @@ This tool has three modes: cluster bootstrapping was not possible, it also enables you to move nodes into a brand-new cluster. +* `elasticsearch-node override-version` enables you to start up a node + even if the data in the data path was written by an incompatible version of + {es}. This may sometimes allow you to downgrade to an earlier version of + {es}. + [[node-tool-repurpose]] [float] ==== Changing the role of a node @@ -109,6 +115,25 @@ way forward that does not risk data loss, but it may be possible to use the `elasticsearch-node` tool to construct a new cluster that contains some of the data from the failed cluster. +[[node-tool-override-version]] +[float] +==== Bypassing version checks + +The data that {es} writes to disk is designed to be read by the current version +and a limited set of future versions. It cannot generally be read by older +versions, nor by versions that are more than one major version newer. The data +stored on disk includes the version of the node that wrote it, and {es} checks +that it is compatible with this version when starting up. + +In rare circumstances it may be desirable to bypass this check and start up an +{es} node using data that was written by an incompatible version. This may not +work if the format of the stored data has changed, and it is a risky process +because it is possible for the format to change in ways that {es} may +misinterpret, silently leading to data loss. + +To bypass this check, you can use the `elasticsearch-node override-version` +tool to overwrite the version number stored in the data path with the current +version, causing {es} to believe that it is compatible with the on-disk data. [[node-tool-unsafe-bootstrap]] [float] @@ -262,6 +287,9 @@ one-node cluster. `detach-cluster`:: Specifies to unsafely detach this node from its cluster so it can join a different cluster. +`override-version`:: Overwrites the version number stored in the data path so +that a node can start despite being incompatible with the on-disk data. + `--ordinal `:: If there is <> then this specifies which node to target. Defaults to `0`, meaning to use the first node in the data path. @@ -423,3 +451,32 @@ Do you want to proceed? Confirm [y/N] y Node was successfully detached from the cluster ---- + +[float] +==== Bypassing version checks + +Run the `elasticsearch-node override-version` command to overwrite the version +stored in the data path so that a node can start despite being incompatible +with the data stored in the data path: + +[source, txt] +---- +node$ ./bin/elasticsearch-node override-version + + WARNING: Elasticsearch MUST be stopped before running this tool. + +This data path was last written by Elasticsearch version [x.x.x] and may no +longer be compatible with Elasticsearch version [y.y.y]. This tool will bypass +this compatibility check, allowing a version [y.y.y] node to start on this data +path, but a version [y.y.y] node may not be able to read this data or may read +it incorrectly leading to data loss. + +You should not use this tool. Instead, continue to use a version [x.x.x] node +on this data path. If necessary, you can use reindex-from-remote to copy the +data from here into an older cluster. + +Do you want to proceed? + +Confirm [y/N] y +Successfully overwrote this node's metadata to bypass its version compatibility checks. +---- diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java index 2ce9d520824..fbfcc4672bb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java @@ -44,7 +44,7 @@ import java.util.Objects; public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand { private static final Logger logger = LogManager.getLogger(ElasticsearchNodeCommand.class); protected final NamedXContentRegistry namedXContentRegistry; - static final String DELIMITER = "------------------------------------------------------------------------\n"; + protected static final String DELIMITER = "------------------------------------------------------------------------\n"; static final String STOP_WARNING_MSG = DELIMITER + @@ -81,9 +81,8 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand { throw new ElasticsearchException(NO_NODE_FOLDER_FOUND_MSG); } processNodePaths(terminal, dataPaths, env); - } catch (LockObtainFailedException ex) { - throw new ElasticsearchException( - FAILED_TO_OBTAIN_NODE_LOCK_MSG + " [" + ex.getMessage() + "]"); + } catch (LockObtainFailedException e) { + throw new ElasticsearchException(FAILED_TO_OBTAIN_NODE_LOCK_MSG, e); } } @@ -166,6 +165,18 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand { } } + protected NodeEnvironment.NodePath[] toNodePaths(Path[] dataPaths) { + return Arrays.stream(dataPaths).map(ElasticsearchNodeCommand::createNodePath).toArray(NodeEnvironment.NodePath[]::new); + } + + private static NodeEnvironment.NodePath createNodePath(Path path) { + try { + return new NodeEnvironment.NodePath(path); + } catch (IOException e) { + throw new ElasticsearchException("Unable to investigate path [" + path + "]", e); + } + } + //package-private for testing OptionParser getParser() { return parser; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java index d6bd22bcd76..ff054e71eee 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeToolCli.java @@ -22,6 +22,7 @@ import org.elasticsearch.cli.CommandLoggingConfigurator; import org.elasticsearch.cli.MultiCommand; import org.elasticsearch.cli.Terminal; import org.elasticsearch.env.NodeRepurposeCommand; +import org.elasticsearch.env.OverrideNodeVersionCommand; // NodeToolCli does not extend LoggingAwareCommand, because LoggingAwareCommand performs logging initialization // after LoggingAwareCommand instance is constructed. @@ -39,6 +40,7 @@ public class NodeToolCli extends MultiCommand { subcommands.put("repurpose", new NodeRepurposeCommand()); subcommands.put("unsafe-bootstrap", new UnsafeBootstrapMasterCommand()); subcommands.put("detach-cluster", new DetachClusterCommand()); + subcommands.put("override-version", new OverrideNodeVersionCommand()); } public static void main(String[] args) throws Exception { diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index f7e6f8e949b..cc36d734213 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -31,6 +31,7 @@ import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.CheckedFunction; @@ -250,7 +251,7 @@ public final class NodeEnvironment implements Closeable { sharedDataPath = null; locks = null; nodeLockId = -1; - nodeMetaData = new NodeMetaData(generateNodeId(settings)); + nodeMetaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT); return; } boolean success = false; @@ -395,7 +396,6 @@ public final class NodeEnvironment implements Closeable { logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops); } - /** * scans the node paths and loads existing metaData file. If not found a new meta data will be generated * and persisted into the nodePaths @@ -405,10 +405,15 @@ public final class NodeEnvironment implements Closeable { final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new); NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, paths); if (metaData == null) { - metaData = new NodeMetaData(generateNodeId(settings)); + metaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT); + } else { + metaData = metaData.upgradeToCurrentVersion(); } + // we write again to make sure all paths have the latest state file + assert metaData.nodeVersion().equals(Version.CURRENT) : metaData.nodeVersion() + " != " + Version.CURRENT; NodeMetaData.FORMAT.writeAndCleanup(metaData, paths); + return metaData; } diff --git a/server/src/main/java/org/elasticsearch/env/NodeMetaData.java b/server/src/main/java/org/elasticsearch/env/NodeMetaData.java index dbea3164c8a..f9deba8f6c3 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeMetaData.java +++ b/server/src/main/java/org/elasticsearch/env/NodeMetaData.java @@ -19,6 +19,7 @@ package org.elasticsearch.env; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -31,66 +32,104 @@ import java.io.OutputStream; import java.util.Objects; /** - * Metadata associated with this node. Currently only contains the unique uuid describing this node. + * Metadata associated with this node: its persistent node ID and its version. * The metadata is persisted in the data folder of this node and is reused across restarts. */ public final class NodeMetaData { private static final String NODE_ID_KEY = "node_id"; + private static final String NODE_VERSION_KEY = "node_version"; private final String nodeId; - public NodeMetaData(final String nodeId) { + private final Version nodeVersion; + + public NodeMetaData(final String nodeId, final Version nodeVersion) { this.nodeId = Objects.requireNonNull(nodeId); + this.nodeVersion = Objects.requireNonNull(nodeVersion); } @Override public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; NodeMetaData that = (NodeMetaData) o; - - return Objects.equals(this.nodeId, that.nodeId); + return nodeId.equals(that.nodeId) && + nodeVersion.equals(that.nodeVersion); } @Override public int hashCode() { - return this.nodeId.hashCode(); + return Objects.hash(nodeId, nodeVersion); } @Override public String toString() { - return "node_id [" + nodeId + "]"; + return "NodeMetaData{" + + "nodeId='" + nodeId + '\'' + + ", nodeVersion=" + nodeVersion + + '}'; } private static ObjectParser PARSER = new ObjectParser<>("node_meta_data", Builder::new); static { PARSER.declareString(Builder::setNodeId, new ParseField(NODE_ID_KEY)); + PARSER.declareInt(Builder::setNodeVersionId, new ParseField(NODE_VERSION_KEY)); } public String nodeId() { return nodeId; } + public Version nodeVersion() { + return nodeVersion; + } + + public NodeMetaData upgradeToCurrentVersion() { + if (nodeVersion.equals(Version.V_EMPTY)) { + assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards"; + return new NodeMetaData(nodeId, Version.CURRENT); + } + + if (nodeVersion.before(Version.CURRENT.minimumIndexCompatibilityVersion())) { + throw new IllegalStateException( + "cannot upgrade a node from version [" + nodeVersion + "] directly to version [" + Version.CURRENT + "]"); + } + + if (nodeVersion.after(Version.CURRENT)) { + throw new IllegalStateException( + "cannot downgrade a node from version [" + nodeVersion + "] to version [" + Version.CURRENT + "]"); + } + + return nodeVersion.equals(Version.CURRENT) ? this : new NodeMetaData(nodeId, Version.CURRENT); + } + private static class Builder { String nodeId; + Version nodeVersion; public void setNodeId(String nodeId) { this.nodeId = nodeId; } + public void setNodeVersionId(int nodeVersionId) { + this.nodeVersion = Version.fromId(nodeVersionId); + } + public NodeMetaData build() { - return new NodeMetaData(nodeId); + final Version nodeVersion; + if (this.nodeVersion == null) { + assert Version.CURRENT.major <= Version.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards"; + nodeVersion = Version.V_EMPTY; + } else { + nodeVersion = this.nodeVersion; + } + + return new NodeMetaData(nodeId, nodeVersion); } } - public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat("node-") { @Override @@ -103,10 +142,11 @@ public final class NodeMetaData { @Override public void toXContent(XContentBuilder builder, NodeMetaData nodeMetaData) throws IOException { builder.field(NODE_ID_KEY, nodeMetaData.nodeId); + builder.field(NODE_VERSION_KEY, nodeMetaData.nodeVersion.id); } @Override - public NodeMetaData fromXContent(XContentParser parser) throws IOException { + public NodeMetaData fromXContent(XContentParser parser) { return PARSER.apply(parser, null).build(); } }; diff --git a/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java b/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java index 7331d8528fc..cfe3ab6b3fd 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java +++ b/server/src/main/java/org/elasticsearch/env/NodeRepurposeCommand.java @@ -172,10 +172,6 @@ public class NodeRepurposeCommand extends ElasticsearchNodeCommand { } } - private NodeEnvironment.NodePath[] toNodePaths(Path[] dataPaths) { - return Arrays.stream(dataPaths).map(NodeRepurposeCommand::createNodePath).toArray(NodeEnvironment.NodePath[]::new); - } - private Set indexUUIDsFor(Set indexPaths) { return indexPaths.stream().map(Path::getFileName).map(Path::toString).collect(Collectors.toSet()); } @@ -226,14 +222,6 @@ public class NodeRepurposeCommand extends ElasticsearchNodeCommand { return Arrays.stream(paths).flatMap(Collection::stream).map(Path::getParent).collect(Collectors.toSet()); } - private static NodeEnvironment.NodePath createNodePath(Path path) { - try { - return new NodeEnvironment.NodePath(path); - } catch (IOException e) { - throw new ElasticsearchException("Unable to investigate path: " + path + ": " + e.getMessage()); - } - } - //package-private for testing OptionParser getParser() { return parser; diff --git a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java new file mode 100644 index 00000000000..a46e185a253 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.env; + +import joptsimple.OptionParser; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cluster.coordination.ElasticsearchNodeCommand; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Arrays; + +public class OverrideNodeVersionCommand extends ElasticsearchNodeCommand { + private static final Logger logger = LogManager.getLogger(OverrideNodeVersionCommand.class); + + private static final String TOO_NEW_MESSAGE = + DELIMITER + + "\n" + + "This data path was last written by Elasticsearch version [V_NEW] and may no\n" + + "longer be compatible with Elasticsearch version [V_CUR]. This tool will bypass\n" + + "this compatibility check, allowing a version [V_CUR] node to start on this data\n" + + "path, but a version [V_CUR] node may not be able to read this data or may read\n" + + "it incorrectly leading to data loss.\n" + + "\n" + + "You should not use this tool. Instead, continue to use a version [V_NEW] node\n" + + "on this data path. If necessary, you can use reindex-from-remote to copy the\n" + + "data from here into an older cluster.\n" + + "\n" + + "Do you want to proceed?\n"; + + private static final String TOO_OLD_MESSAGE = + DELIMITER + + "\n" + + "This data path was last written by Elasticsearch version [V_OLD] which may be\n" + + "too old to be readable by Elasticsearch version [V_CUR]. This tool will bypass\n" + + "this compatibility check, allowing a version [V_CUR] node to start on this data\n" + + "path, but this version [V_CUR] node may not be able to read this data or may\n" + + "read it incorrectly leading to data loss.\n" + + "\n" + + "You should not use this tool. Instead, upgrade this data path from [V_OLD] to\n" + + "[V_CUR] using one or more intermediate versions of Elasticsearch.\n" + + "\n" + + "Do you want to proceed?\n"; + + static final String NO_METADATA_MESSAGE = "no node metadata found, so there is no version to override"; + static final String SUCCESS_MESSAGE = "Successfully overwrote this node's metadata to bypass its version compatibility checks."; + + public OverrideNodeVersionCommand() { + super("Overwrite the version stored in this node's data path with [" + Version.CURRENT + + "] to bypass the version compatibility checks"); + } + + @Override + protected void processNodePaths(Terminal terminal, Path[] dataPaths, Environment env) throws IOException { + final Path[] nodePaths = Arrays.stream(toNodePaths(dataPaths)).map(p -> p.path).toArray(Path[]::new); + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodePaths); + if (nodeMetaData == null) { + throw new ElasticsearchException(NO_METADATA_MESSAGE); + } + + try { + nodeMetaData.upgradeToCurrentVersion(); + throw new ElasticsearchException("found [" + nodeMetaData + "] which is compatible with current version [" + Version.CURRENT + + "], so there is no need to override the version checks"); + } catch (IllegalStateException e) { + // ok, means the version change is not supported + } + + confirm(terminal, (nodeMetaData.nodeVersion().before(Version.CURRENT) ? TOO_OLD_MESSAGE : TOO_NEW_MESSAGE) + .replace("V_OLD", nodeMetaData.nodeVersion().toString()) + .replace("V_NEW", nodeMetaData.nodeVersion().toString()) + .replace("V_CUR", Version.CURRENT.toString())); + + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeMetaData.nodeId(), Version.CURRENT), nodePaths); + + terminal.println(SUCCESS_MESSAGE); + } + + //package-private for testing + OptionParser getParser() { + return parser; + } +} diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index 3f28fead294..d5dbfe82866 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -382,7 +382,7 @@ public abstract class MetaDataStateFormat { return files; } - private String getStateFileName(long generation) { + public String getStateFileName(long generation) { return prefix + generation + STATE_FILE_EXTENSION; } @@ -466,7 +466,7 @@ public abstract class MetaDataStateFormat { IOUtils.rm(stateDirectories); } - String getPrefix() { + public String getPrefix() { return prefix; } } diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java index 36f75c79a17..37e260a01d0 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -19,12 +19,18 @@ package org.elasticsearch.env; +import org.elasticsearch.Version; +import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; +import java.nio.file.Path; + +import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.startsWith; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -86,4 +92,35 @@ public class NodeEnvironmentIT extends ESIntegTestCase { + Node.NODE_DATA_SETTING.getKey() + "=false, but has shard data")); } + + private IllegalStateException expectThrowsOnRestart(CheckedConsumer onNodeStopped) { + internalCluster().startNode(); + final Path[] dataPaths = internalCluster().getInstance(NodeEnvironment.class).nodeDataPaths(); + return expectThrows(IllegalStateException.class, + () -> internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + try { + onNodeStopped.accept(dataPaths); + } catch (Exception e) { + throw new AssertionError(e); + } + return Settings.EMPTY; + } + })); + } + + public void testFailsToStartIfDowngraded() { + final IllegalStateException illegalStateException = expectThrowsOnRestart(dataPaths -> + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(randomAlphaOfLength(10), NodeMetaDataTests.tooNewVersion()), dataPaths)); + assertThat(illegalStateException.getMessage(), + allOf(startsWith("cannot downgrade a node from version ["), endsWith("] to version [" + Version.CURRENT + "]"))); + } + + public void testFailsToStartIfUpgradedTooFar() { + final IllegalStateException illegalStateException = expectThrowsOnRestart(dataPaths -> + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(randomAlphaOfLength(10), NodeMetaDataTests.tooOldVersion()), dataPaths)); + assertThat(illegalStateException.getMessage(), + allOf(startsWith("cannot upgrade a node from version ["), endsWith("] directly to version [" + Version.CURRENT + "]"))); + } } diff --git a/server/src/test/java/org/elasticsearch/env/NodeMetaDataTests.java b/server/src/test/java/org/elasticsearch/env/NodeMetaDataTests.java new file mode 100644 index 00000000000..59cf6247f96 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/env/NodeMetaDataTests.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.env; + +import org.elasticsearch.Version; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.gateway.MetaDataStateFormat; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.test.VersionUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.endsWith; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.startsWith; + +public class NodeMetaDataTests extends ESTestCase { + private Version randomVersion() { + // VersionUtils.randomVersion() only returns known versions, which are necessarily no later than Version.CURRENT; however we want + // also to consider our behaviour with all versions, so occasionally pick up a truly random version. + return rarely() ? Version.fromId(randomInt()) : VersionUtils.randomVersion(random()); + } + + public void testEqualsHashcodeSerialization() { + final Path tempDir = createTempDir(); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(new NodeMetaData(randomAlphaOfLength(10), randomVersion()), + nodeMetaData -> { + final long generation = NodeMetaData.FORMAT.writeAndCleanup(nodeMetaData, tempDir); + final Tuple nodeMetaDataLongTuple + = NodeMetaData.FORMAT.loadLatestStateWithGeneration(logger, xContentRegistry(), tempDir); + assertThat(nodeMetaDataLongTuple.v2(), equalTo(generation)); + return nodeMetaDataLongTuple.v1(); + }, nodeMetaData -> { + if (randomBoolean()) { + return new NodeMetaData(randomAlphaOfLength(21 - nodeMetaData.nodeId().length()), nodeMetaData.nodeVersion()); + } else { + return new NodeMetaData(nodeMetaData.nodeId(), randomValueOtherThan(nodeMetaData.nodeVersion(), this::randomVersion)); + } + }); + } + + public void testReadsFormatWithoutVersion() throws IOException { + // the behaviour tested here is only appropriate if the current version is compatible with versions 7 and earlier + assertTrue(Version.CURRENT.minimumIndexCompatibilityVersion().onOrBefore(Version.V_7_0_0)); + // when the current version is incompatible with version 7, the behaviour should change to reject files like the given resource + // which do not have the version field + + final Path tempDir = createTempDir(); + final Path stateDir = Files.createDirectory(tempDir.resolve(MetaDataStateFormat.STATE_DIR_NAME)); + final InputStream resource = this.getClass().getResourceAsStream("testReadsFormatWithoutVersion.binary"); + assertThat(resource, notNullValue()); + Files.copy(resource, stateDir.resolve(NodeMetaData.FORMAT.getStateFileName(between(0, Integer.MAX_VALUE)))); + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), tempDir); + assertThat(nodeMetaData.nodeId(), equalTo("y6VUVMSaStO4Tz-B5BxcOw")); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.V_EMPTY)); + } + + public void testUpgradesLegitimateVersions() { + final String nodeId = randomAlphaOfLength(10); + final NodeMetaData nodeMetaData = new NodeMetaData(nodeId, + randomValueOtherThanMany(v -> v.after(Version.CURRENT) || v.before(Version.CURRENT.minimumIndexCompatibilityVersion()), + this::randomVersion)).upgradeToCurrentVersion(); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + } + + public void testUpgradesMissingVersion() { + final String nodeId = randomAlphaOfLength(10); + final NodeMetaData nodeMetaData = new NodeMetaData(nodeId, Version.V_EMPTY).upgradeToCurrentVersion(); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + } + + public void testDoesNotUpgradeFutureVersion() { + final IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> new NodeMetaData(randomAlphaOfLength(10), tooNewVersion()) + .upgradeToCurrentVersion()); + assertThat(illegalStateException.getMessage(), + allOf(startsWith("cannot downgrade a node from version ["), endsWith("] to version [" + Version.CURRENT + "]"))); + } + + public void testDoesNotUpgradeAncientVersion() { + final IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, + () -> new NodeMetaData(randomAlphaOfLength(10), tooOldVersion()).upgradeToCurrentVersion()); + assertThat(illegalStateException.getMessage(), + allOf(startsWith("cannot upgrade a node from version ["), endsWith("] directly to version [" + Version.CURRENT + "]"))); + } + + public static Version tooNewVersion() { + return Version.fromId(between(Version.CURRENT.id + 1, 99999999)); + } + + public static Version tooOldVersion() { + return Version.fromId(between(1, Version.CURRENT.minimumIndexCompatibilityVersion().id - 1)); + } +} diff --git a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java new file mode 100644 index 00000000000..704617c7b5e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java @@ -0,0 +1,155 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.env; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cli.MockTerminal; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.WriteStateException; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.nio.file.Path; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class OverrideNodeVersionCommandTests extends ESTestCase { + + private Environment environment; + private Path[] nodePaths; + + @Before + public void createNodePaths() throws IOException { + final Settings settings = buildEnvSettings(Settings.EMPTY); + environment = TestEnvironment.newEnvironment(settings); + try (NodeEnvironment nodeEnvironment = new NodeEnvironment(settings, environment)) { + nodePaths = nodeEnvironment.nodeDataPaths(); + } + } + + public void testFailsOnEmptyPath() { + final Path emptyPath = createTempDir(); + final MockTerminal mockTerminal = new MockTerminal(); + final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, new Path[]{emptyPath}, environment)); + assertThat(elasticsearchException.getMessage(), equalTo(OverrideNodeVersionCommand.NO_METADATA_MESSAGE)); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + } + + public void testFailsIfUnnecessary() throws WriteStateException { + final Version nodeVersion = Version.fromId(between(Version.CURRENT.minimumIndexCompatibilityVersion().id, Version.CURRENT.id)); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(randomAlphaOfLength(10), nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment)); + assertThat(elasticsearchException.getMessage(), allOf( + containsString("compatible with current version"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + } + + public void testWarnsIfTooOld() throws Exception { + final String nodeId = randomAlphaOfLength(10); + final Version nodeVersion = NodeMetaDataTests.tooOldVersion(); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + mockTerminal.addTextInput("n\n"); + final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment)); + assertThat(elasticsearchException.getMessage(), equalTo("aborted by user")); + assertThat(mockTerminal.getOutput(), allOf( + containsString("too old"), + containsString("data loss"), + containsString("You should not use this tool"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + assertThat(nodeMetaData.nodeVersion(), equalTo(nodeVersion)); + } + + public void testWarnsIfTooNew() throws Exception { + final String nodeId = randomAlphaOfLength(10); + final Version nodeVersion = NodeMetaDataTests.tooNewVersion(); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + mockTerminal.addTextInput(randomFrom("yy", "Yy", "n", "yes", "true", "N", "no")); + final ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, () -> + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment)); + assertThat(elasticsearchException.getMessage(), equalTo("aborted by user")); + assertThat(mockTerminal.getOutput(), allOf( + containsString("data loss"), + containsString("You should not use this tool"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + assertThat(nodeMetaData.nodeVersion(), equalTo(nodeVersion)); + } + + public void testOverwritesIfTooOld() throws Exception { + final String nodeId = randomAlphaOfLength(10); + final Version nodeVersion = NodeMetaDataTests.tooOldVersion(); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + mockTerminal.addTextInput(randomFrom("y", "Y")); + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment); + assertThat(mockTerminal.getOutput(), allOf( + containsString("too old"), + containsString("data loss"), + containsString("You should not use this tool"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()), + containsString(OverrideNodeVersionCommand.SUCCESS_MESSAGE))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); + } + + public void testOverwritesIfTooNew() throws Exception { + final String nodeId = randomAlphaOfLength(10); + final Version nodeVersion = NodeMetaDataTests.tooNewVersion(); + NodeMetaData.FORMAT.writeAndCleanup(new NodeMetaData(nodeId, nodeVersion), nodePaths); + final MockTerminal mockTerminal = new MockTerminal(); + mockTerminal.addTextInput(randomFrom("y", "Y")); + new OverrideNodeVersionCommand().processNodePaths(mockTerminal, nodePaths, environment); + assertThat(mockTerminal.getOutput(), allOf( + containsString("data loss"), + containsString("You should not use this tool"), + containsString(Version.CURRENT.toString()), + containsString(nodeVersion.toString()), + containsString(OverrideNodeVersionCommand.SUCCESS_MESSAGE))); + expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); + + final NodeMetaData nodeMetaData = NodeMetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodePaths); + assertThat(nodeMetaData.nodeId(), equalTo(nodeId)); + assertThat(nodeMetaData.nodeVersion(), equalTo(Version.CURRENT)); + } +} diff --git a/server/src/test/resources/org/elasticsearch/env/testReadsFormatWithoutVersion.binary b/server/src/test/resources/org/elasticsearch/env/testReadsFormatWithoutVersion.binary new file mode 100644 index 0000000000000000000000000000000000000000..3a8bb297e7449461f9193810654025a61ae891da GIT binary patch literal 71 zcmcD&o+Hj$T#{Il%D}+D2*OsHT&%y^^72zs<1BPLaKC~Or0u{ U{mXwJ(3t!Js2B_;><`@o0PKks=>Px# literal 0 HcmV?d00001 From 7b3a9c7033f9c4a348938f4c8caeb15cfe704c46 Mon Sep 17 00:00:00 2001 From: Tim Vernum Date: Tue, 21 May 2019 18:14:22 +1000 Subject: [PATCH 14/25] Do not refresh realm cache unless required (#42212) If there are no realms that depend on the native role mapping store, then changes should it should not perform any cache refresh. A refresh with an empty realm array will refresh all realms. This also fixes a spurious log warning that could occur if the role mapping store was notified that the security index was recovered before any realm were attached. Backport of: #42169 --- .../mapper/NativeRoleMappingStore.java | 10 +++- .../mapper/NativeRoleMappingStoreTests.java | 55 +++++++++++++------ 2 files changed, 45 insertions(+), 20 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index 1b6da7f68ca..bb98dddbe1d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.client.Client; import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -329,7 +330,12 @@ public class NativeRoleMappingStore implements UserRoleMapper { } private void refreshRealms(ActionListener listener, Result result) { - String[] realmNames = this.realmsToRefresh.toArray(new String[realmsToRefresh.size()]); + if (realmsToRefresh.isEmpty()) { + listener.onResponse(result); + return; + } + + final String[] realmNames = this.realmsToRefresh.toArray(Strings.EMPTY_ARRAY); final SecurityClient securityClient = new SecurityClient(client); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, securityClient.prepareClearRealmCache().realms(realmNames).request(), @@ -340,7 +346,7 @@ public class NativeRoleMappingStore implements UserRoleMapper { listener.onResponse(result); }, ex -> { - logger.warn("Failed to clear cache for realms [{}]", Arrays.toString(realmNames)); + logger.warn(new ParameterizedMessage("Failed to clear cache for realms [{}]", Arrays.toString(realmNames)), ex); listener.onFailure(ex); }), securityClient::clearRealmCache); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 6bb6e0c7b58..3cca6cc4fd3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -143,7 +143,7 @@ public class NativeRoleMappingStoreTests extends ESTestCase { public void testCacheClearOnIndexHealthChange() { final AtomicInteger numInvalidation = new AtomicInteger(0); - final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation); + final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation, true); int expectedInvalidation = 0; // existing to no longer present @@ -180,7 +180,7 @@ public class NativeRoleMappingStoreTests extends ESTestCase { public void testCacheClearOnIndexOutOfDateChange() { final AtomicInteger numInvalidation = new AtomicInteger(0); - final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation); + final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation, true); store.onSecurityIndexStateChange( new SecurityIndexManager.State(Instant.now(), false, true, true, null, concreteSecurityIndexName, null), @@ -193,40 +193,59 @@ public class NativeRoleMappingStoreTests extends ESTestCase { assertEquals(2, numInvalidation.get()); } - private NativeRoleMappingStore buildRoleMappingStoreForInvalidationTesting(AtomicInteger invalidationCounter) { + public void testCacheIsNotClearedIfNoRealmsAreAttached() { + final AtomicInteger numInvalidation = new AtomicInteger(0); + final NativeRoleMappingStore store = buildRoleMappingStoreForInvalidationTesting(numInvalidation, false); + + final SecurityIndexManager.State noIndexState = dummyState(null); + final SecurityIndexManager.State greenIndexState = dummyState(ClusterHealthStatus.GREEN); + store.onSecurityIndexStateChange(noIndexState, greenIndexState); + assertEquals(0, numInvalidation.get()); + } + + private NativeRoleMappingStore buildRoleMappingStoreForInvalidationTesting(AtomicInteger invalidationCounter, boolean attachRealm) { final Settings settings = Settings.builder().put("path.home", createTempDir()).build(); final ThreadPool threadPool = mock(ThreadPool.class); final ThreadContext threadContext = new ThreadContext(settings); when(threadPool.getThreadContext()).thenReturn(threadContext); + final String realmName = randomAlphaOfLengthBetween(4, 8); + final Client client = mock(Client.class); when(client.threadPool()).thenReturn(threadPool); when(client.settings()).thenReturn(settings); doAnswer(invocationOnMock -> { + assertThat(invocationOnMock.getArguments(), Matchers.arrayWithSize(3)); + final ClearRealmCacheRequest request = (ClearRealmCacheRequest) invocationOnMock.getArguments()[1]; + assertThat(request.realms(), Matchers.arrayContaining(realmName)); + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; invalidationCounter.incrementAndGet(); listener.onResponse(new ClearRealmCacheResponse(new ClusterName("cluster"), Collections.emptyList(), Collections.emptyList())); return null; }).when(client).execute(eq(ClearRealmCacheAction.INSTANCE), any(ClearRealmCacheRequest.class), any(ActionListener.class)); - final Environment env = TestEnvironment.newEnvironment(settings); - final RealmConfig realmConfig = new RealmConfig(new RealmConfig.RealmIdentifier("ldap", getTestName()), - settings, env, threadContext); - final CachingUsernamePasswordRealm mockRealm = new CachingUsernamePasswordRealm(realmConfig, threadPool) { - @Override - protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { - listener.onResponse(AuthenticationResult.notHandled()); - } - - @Override - protected void doLookupUser(String username, ActionListener listener) { - listener.onResponse(null); - } - }; final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, mock(SecurityIndexManager.class), mock(ScriptService.class)); - store.refreshRealmOnChange(mockRealm); + + if (attachRealm) { + final Environment env = TestEnvironment.newEnvironment(settings); + final RealmConfig.RealmIdentifier identifier = new RealmConfig.RealmIdentifier("ldap", realmName); + final RealmConfig realmConfig = new RealmConfig(identifier, settings, env, threadContext); + final CachingUsernamePasswordRealm mockRealm = new CachingUsernamePasswordRealm(realmConfig, threadPool) { + @Override + protected void doAuthenticate(UsernamePasswordToken token, ActionListener listener) { + listener.onResponse(AuthenticationResult.notHandled()); + } + + @Override + protected void doLookupUser(String username, ActionListener listener) { + listener.onResponse(null); + } + }; + store.refreshRealmOnChange(mockRealm); + } return store; } } From 24144aead2d4598711af51c979ed238933548201 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 21 May 2019 10:24:20 +0100 Subject: [PATCH 15/25] [ML] Complete the Data Frame task on stop (#41752) (#42063) Wait for indexer to stop then complete the persistent task on stop. If the wait_for_completion is true the request will not return until stopped. --- .../client/DataFrameTransformIT.java | 5 +- .../DataFrameTransformDocumentationIT.java | 3 +- .../DeleteDataFrameTransformAction.java | 87 ++---------- .../core/indexing/AsyncTwoPhaseIndexer.java | 24 +++- ...DataFrameTransformActionResponseTests.java | 22 --- .../indexing/AsyncTwoPhaseIndexerTests.java | 75 ++++++++-- .../integration/DataFrameIntegTestCase.java | 6 +- .../integration/DataFrameRestTestCase.java | 11 +- ...ansportDeleteDataFrameTransformAction.java | 98 ++++++------- ...portGetDataFrameTransformsStatsAction.java | 1 - .../TransportPutDataFrameTransformAction.java | 4 - ...TransportStopDataFrameTransformAction.java | 129 +++++++++++------- .../RestDeleteDataFrameTransformAction.java | 3 +- .../transforms/DataFrameTransformTask.java | 54 ++------ .../test/data_frame/transforms_start_stop.yml | 4 + 15 files changed, 249 insertions(+), 277 deletions(-) delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformActionResponseTests.java diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index f01db621bc2..1bd49154ee5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -141,7 +141,8 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase { @After public void cleanUpTransforms() throws IOException { for (String transformId : transformsToClean) { - highLevelClient().dataFrame().stopDataFrameTransform(new StopDataFrameTransformRequest(transformId), RequestOptions.DEFAULT); + highLevelClient().dataFrame().stopDataFrameTransform( + new StopDataFrameTransformRequest(transformId, Boolean.TRUE, null), RequestOptions.DEFAULT); } for (String transformId : transformsToClean) { @@ -265,7 +266,7 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase { assertThat(statsResponse.getTransformsStateAndStats(), hasSize(1)); assertEquals(IndexerState.STARTED, statsResponse.getTransformsStateAndStats().get(0).getTransformState().getIndexerState()); - StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id); + StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id, Boolean.TRUE, null); StopDataFrameTransformResponse stopResponse = execute(stopRequest, client::stopDataFrameTransform, client::stopDataFrameTransformAsync); assertTrue(stopResponse.isStopped()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java index 4bd78f12ae4..6f7832cbf3c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java @@ -76,7 +76,8 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest @After public void cleanUpTransforms() throws IOException { for (String transformId : transformsToClean) { - highLevelClient().dataFrame().stopDataFrameTransform(new StopDataFrameTransformRequest(transformId), RequestOptions.DEFAULT); + highLevelClient().dataFrame().stopDataFrameTransform( + new StopDataFrameTransformRequest(transformId, Boolean.TRUE, TimeValue.timeValueSeconds(20)), RequestOptions.DEFAULT); } for (String transformId : transformsToClean) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java index 6b7de0ab80f..715fa0f5dc7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java @@ -7,25 +7,18 @@ package org.elasticsearch.xpack.core.dataframe.action; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.FailedNodeException; -import org.elasticsearch.action.TaskOperationFailure; -import org.elasticsearch.action.support.tasks.BaseTasksRequest; -import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.tasks.Task; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.utils.ExceptionsHelper; import java.io.IOException; -import java.util.Collections; -import java.util.List; import java.util.Objects; -public class DeleteDataFrameTransformAction extends Action { +public class DeleteDataFrameTransformAction extends Action { public static final DeleteDataFrameTransformAction INSTANCE = new DeleteDataFrameTransformAction(); public static final String NAME = "cluster:admin/data_frame/delete"; @@ -35,17 +28,21 @@ public class DeleteDataFrameTransformAction extends Action getResponseReader() { - return Response::new; + public Writeable.Reader getResponseReader() { + return in -> { + AcknowledgedResponse response = new AcknowledgedResponse(); + response.readFrom(in); + return response; + }; } - public static class Request extends BaseTasksRequest { - private final String id; + public static class Request extends MasterNodeRequest { + private String id; public Request(String id) { this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); @@ -60,11 +57,6 @@ public class DeleteDataFrameTransformAction extends Action taskFailures, List nodeFailures) { - super(taskFailures, nodeFailures); - this.acknowledged = acknowledged; - } - - public Response(boolean acknowledged) { - this(acknowledged, Collections.emptyList(), Collections.emptyList()); - } - - public boolean isDeleted() { - return acknowledged; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(acknowledged); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { - toXContentCommon(builder, params); - builder.field("acknowledged", acknowledged); - } - builder.endObject(); - return builder; - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || getClass() != o.getClass()) - return false; - DeleteDataFrameTransformAction.Response response = (DeleteDataFrameTransformAction.Response) o; - return super.equals(o) && acknowledged == response.acknowledged; - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), acknowledged); - } - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java index ec7e0de9e34..ccf075b13ae 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -22,9 +22,11 @@ import java.util.concurrent.atomic.AtomicReference; * An abstract class that builds an index incrementally. A background job can be launched using {@link #maybeTriggerAsyncJob(long)}, * it will create the index from the source index up to the last complete bucket that is allowed to be built (based on job position). * Only one background job can run simultaneously and {@link #onFinish} is called when the job - * finishes. {@link #onFailure(Exception)} is called if the job fails with an exception and {@link #onAbort()} is called if the indexer is - * aborted while a job is running. The indexer must be started ({@link #start()} to allow a background job to run when - * {@link #maybeTriggerAsyncJob(long)} is called. {@link #stop()} can be used to stop the background job without aborting the indexer. + * finishes. {@link #onStop()} is called after the current search returns when the job is stopped early via a call + * to {@link #stop()}. {@link #onFailure(Exception)} is called if the job fails with an exception and {@link #onAbort()} + * is called if the indexer is aborted while a job is running. The indexer must be started ({@link #start()} + * to allow a background job to run when {@link #maybeTriggerAsyncJob(long)} is called. + * {@link #stop()} can be used to stop the background job without aborting the indexer. * * In a nutshell this is a 2 cycle engine: 1st it sends a query, 2nd it indexes documents based on the response, sends the next query, * indexes, queries, indexes, ... until a condition lets the engine pause until the source provides new input. @@ -84,8 +86,10 @@ public abstract class AsyncTwoPhaseIndexer listener); + /** + * Called when the indexer is stopped. This is only called when the indexer is stopped + * via {@link #stop()} as opposed to {@link #onFinish(ActionListener)} which is called + * when the indexer's work is done. + */ + protected void onStop() { + } + /** * Called when a background job detects that the indexer is aborted causing the * async execution to stop. @@ -276,6 +289,7 @@ public abstract class AsyncTwoPhaseIndexer { - @Override - protected Response createTestInstance() { - return new Response(randomBoolean()); - } - - @Override - protected Reader instanceReader() { - return Response::new; - } -} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index b39c4f1a25a..e56491bdb57 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ESTestCase; +import org.junit.Before; import java.io.IOException; import java.util.Collections; @@ -34,17 +35,26 @@ import static org.hamcrest.Matchers.equalTo; public class AsyncTwoPhaseIndexerTests extends ESTestCase { AtomicBoolean isFinished = new AtomicBoolean(false); + AtomicBoolean isStopped = new AtomicBoolean(false); + + @Before + public void reset() { + isFinished.set(false); + isStopped.set(false); + } private class MockIndexer extends AsyncTwoPhaseIndexer { private final CountDownLatch latch; // test the execution order private volatile int step; + private final boolean stoppedBeforeFinished; protected MockIndexer(Executor executor, AtomicReference initialState, Integer initialPosition, - CountDownLatch latch) { + CountDownLatch latch, boolean stoppedBeforeFinished) { super(executor, initialState, initialPosition, new MockJobStats()); this.latch = latch; + this.stoppedBeforeFinished = stoppedBeforeFinished; } @Override @@ -57,7 +67,7 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { awaitForLatch(); assertThat(step, equalTo(3)); ++step; - return new IterationResult(Collections.emptyList(), 3, true); + return new IterationResult<>(Collections.emptyList(), 3, true); } private void awaitForLatch() { @@ -99,7 +109,8 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { @Override protected void doSaveState(IndexerState state, Integer position, Runnable next) { - assertThat(step, equalTo(5)); + int expectedStep = stoppedBeforeFinished ? 3 : 5; + assertThat(step, equalTo(expectedStep)); ++step; next.run(); } @@ -114,7 +125,12 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { assertThat(step, equalTo(4)); ++step; listener.onResponse(null); - isFinished.set(true); + assertTrue(isFinished.compareAndSet(false, true)); + } + + @Override + protected void onStop() { + assertTrue(isStopped.compareAndSet(false, true)); } @Override @@ -180,7 +196,7 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { protected void onFailure(Exception exc) { assertThat(step, equalTo(2)); ++step; - isFinished.set(true); + assertTrue(isFinished.compareAndSet(false, true)); } @Override @@ -209,10 +225,9 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { public void testStateMachine() throws Exception { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); - isFinished.set(false); try { CountDownLatch countDownLatch = new CountDownLatch(1); - MockIndexer indexer = new MockIndexer(executor, state, 2, countDownLatch); + MockIndexer indexer = new MockIndexer(executor, state, 2, countDownLatch, false); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); @@ -220,7 +235,8 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { countDownLatch.countDown(); assertThat(indexer.getPosition(), equalTo(2)); - ESTestCase.awaitBusy(() -> isFinished.get()); + assertTrue(awaitBusy(() -> isFinished.get())); + assertFalse(isStopped.get()); assertThat(indexer.getStep(), equalTo(6)); assertThat(indexer.getStats().getNumInvocations(), equalTo(1L)); assertThat(indexer.getStats().getNumPages(), equalTo(1L)); @@ -234,18 +250,57 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { public void testStateMachineBrokenSearch() throws InterruptedException { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); - isFinished.set(false); try { MockIndexerThrowsFirstSearch indexer = new MockIndexerThrowsFirstSearch(executor, state, 2); indexer.start(); assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); - assertTrue(ESTestCase.awaitBusy(() -> isFinished.get(), 10000, TimeUnit.SECONDS)); + assertTrue(awaitBusy(() -> isFinished.get(), 10000, TimeUnit.SECONDS)); assertThat(indexer.getStep(), equalTo(3)); } finally { executor.shutdownNow(); } } + + public void testStop_AfterIndexerIsFinished() throws InterruptedException { + AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); + final ExecutorService executor = Executors.newFixedThreadPool(1); + try { + CountDownLatch countDownLatch = new CountDownLatch(1); + MockIndexer indexer = new MockIndexer(executor, state, 2, countDownLatch, false); + indexer.start(); + assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + countDownLatch.countDown(); + assertTrue(awaitBusy(() -> isFinished.get())); + + indexer.stop(); + assertTrue(isStopped.get()); + assertThat(indexer.getState(), equalTo(IndexerState.STOPPED)); + } finally { + executor.shutdownNow(); + } + } + + public void testStop_WhileIndexing() throws InterruptedException { + AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); + final ExecutorService executor = Executors.newFixedThreadPool(1); + try { + CountDownLatch countDownLatch = new CountDownLatch(1); + MockIndexer indexer = new MockIndexer(executor, state, 2, countDownLatch, true); + indexer.start(); + assertThat(indexer.getState(), equalTo(IndexerState.STARTED)); + assertTrue(indexer.maybeTriggerAsyncJob(System.currentTimeMillis())); + assertThat(indexer.getState(), equalTo(IndexerState.INDEXING)); + indexer.stop(); + countDownLatch.countDown(); + + assertThat(indexer.getPosition(), equalTo(2)); + assertTrue(awaitBusy(() -> isStopped.get())); + assertFalse(isFinished.get()); + } finally { + executor.shutdownNow(); + } + } } diff --git a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java index 2dd116dfd66..3a6ab2e5b71 100644 --- a/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java +++ b/x-pack/plugin/data-frame/qa/multi-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameIntegTestCase.java @@ -93,11 +93,11 @@ abstract class DataFrameIntegTestCase extends ESIntegTestCase { new StartDataFrameTransformAction.Request(id, false)).actionGet(); } - protected DeleteDataFrameTransformAction.Response deleteDataFrameTransform(String id) { - DeleteDataFrameTransformAction.Response response = client().execute(DeleteDataFrameTransformAction.INSTANCE, + protected AcknowledgedResponse deleteDataFrameTransform(String id) { + AcknowledgedResponse response = client().execute(DeleteDataFrameTransformAction.INSTANCE, new DeleteDataFrameTransformAction.Request(id)) .actionGet(); - if (response.isDeleted()) { + if (response.isAcknowledged()) { transformConfigs.remove(id); } return response; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java index baa1cbf678f..db07e8513cc 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameRestTestCase.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.dataframe.persistence.DataFrameInternalIndex; +import org.junit.After; import org.junit.AfterClass; import java.io.IOException; @@ -278,16 +279,20 @@ public abstract class DataFrameRestTestCase extends ESRestTestCase { adminClient().performRequest(request); } - @AfterClass - public static void removeIndices() throws Exception { + @After + public void waitForDataFrame() throws Exception { wipeDataFrameTransforms(); waitForPendingDataFrameTasks(); + } + + @AfterClass + public static void removeIndices() throws Exception { // we might have disabled wiping indices, but now its time to get rid of them // note: can not use super.cleanUpCluster() as this method must be static wipeIndices(); } - protected static void wipeDataFrameTransforms() throws IOException, InterruptedException { + public void wipeDataFrameTransforms() throws IOException, InterruptedException { List> transformConfigs = getDataFrameTransforms(); for (Map transformConfig : transformConfigs) { String transformId = (String) transformConfig.get("id"); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java index 2cdc4009e78..ac40334dfb4 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java @@ -5,93 +5,73 @@ */ package org.elasticsearch.xpack.dataframe.action; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.action.FailedNodeException; -import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.tasks.TransportTasksAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.tasks.Task; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction.Request; -import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction.Response; -import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; -import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; -import java.util.List; +import java.io.IOException; -public class TransportDeleteDataFrameTransformAction extends TransportTasksAction { +public class TransportDeleteDataFrameTransformAction extends TransportMasterNodeAction { private final DataFrameTransformsConfigManager transformsConfigManager; @Inject - public TransportDeleteDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, - ClusterService clusterService, DataFrameTransformsConfigManager transformsConfigManager) { - super(DeleteDataFrameTransformAction.NAME, clusterService, transportService, actionFilters, Request::new, Response::new, - Response::new, ThreadPool.Names.SAME); + public TransportDeleteDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, ThreadPool threadPool, + ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, + DataFrameTransformsConfigManager transformsConfigManager) { + super(DeleteDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, + Request::new, indexNameExpressionResolver); this.transformsConfigManager = transformsConfigManager; } @Override - protected Response newResponse(Request request, List tasks, List taskOperationFailures, - List failedNodeExceptions) { - assert tasks.size() + taskOperationFailures.size() == 1; - boolean cancelled = tasks.size() > 0 && tasks.stream().allMatch(Response::isDeleted); - - return new Response(cancelled, taskOperationFailures, failedNodeExceptions); + protected String executor() { + return ThreadPool.Names.SAME; } @Override - protected void taskOperation(Request request, DataFrameTransformTask task, ActionListener listener) { - assert task.getTransformId().equals(request.getId()); - IndexerState state = task.getState().getIndexerState(); - if (state.equals(IndexerState.STOPPED)) { - task.onCancelled(); - transformsConfigManager.deleteTransform(request.getId(), ActionListener.wrap(r -> { - listener.onResponse(new Response(true)); - }, listener::onFailure)); + protected AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + protected AcknowledgedResponse read(StreamInput in) throws IOException { + AcknowledgedResponse response = new AcknowledgedResponse(); + response.readFrom(in); + return response; + } + + @Override + protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { + PersistentTasksCustomMetaData pTasksMeta = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + if (pTasksMeta != null && pTasksMeta.getTask(request.getId()) != null) { + listener.onFailure(new ElasticsearchStatusException("Cannot delete data frame [" + request.getId() + + "] as the task is running. Stop the task first", RestStatus.CONFLICT)); } else { - listener.onFailure(new IllegalStateException("Could not delete transform [" + request.getId() + "] because " - + "indexer state is [" + state + "]. Transform must be [" + IndexerState.STOPPED + "] before deletion.")); + // Task is not running, delete the configuration document + transformsConfigManager.deleteTransform(request.getId(), ActionListener.wrap( + r -> listener.onResponse(new AcknowledgedResponse(r)), + listener::onFailure)); } } @Override - protected void doExecute(Task task, Request request, ActionListener listener) { - final ClusterState state = clusterService.state(); - final DiscoveryNodes nodes = state.nodes(); - if (nodes.isLocalNodeElectedMaster()) { - PersistentTasksCustomMetaData pTasksMeta = state.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); - if (pTasksMeta != null && pTasksMeta.getTask(request.getId()) != null) { - super.doExecute(task, request, listener); - } else { - // we couldn't find the transform in the persistent task CS, but maybe the transform exists in the configuration index, - // if so delete the orphaned document and do not throw (for the normal case we want to stop the task first, - // than delete the configuration document if and only if the data frame transform is in stopped state) - transformsConfigManager.deleteTransform(request.getId(), ActionListener.wrap(r -> { - listener.onResponse(new Response(true)); - return; - }, listener::onFailure)); - } - } else { - // Delegates DeleteTransform to elected master node, so it becomes the coordinating node. - // Non-master nodes may have a stale cluster state that shows transforms which are cancelled - // on the master, which makes testing difficult. - if (nodes.getMasterNode() == null) { - listener.onFailure(new MasterNotDiscoveredException("no known master nodes")); - } else { - transportService.sendRequest(nodes.getMasterNode(), actionName, request, - new ActionListenerResponseHandler<>(listener, Response::new)); - } - } + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java index 7ab5f280014..bb01da4c7e5 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportGetDataFrameTransformsStatsAction.java @@ -132,7 +132,6 @@ public class TransportGetDataFrameTransformsStatsAction extends }, e -> { // If the index to search, or the individual config is not there, just return empty - logger.error("failed to expand ids", e); if (e instanceof ResourceNotFoundException) { finalListener.onResponse(new Response(Collections.emptyList())); } else { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java index 0b8ef692cdd..997739b2407 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPutDataFrameTransformAction.java @@ -6,8 +6,6 @@ package org.elasticsearch.xpack.dataframe.action; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.action.ActionListener; @@ -63,8 +61,6 @@ import java.util.stream.Collectors; public class TransportPutDataFrameTransformAction extends TransportMasterNodeAction { - private static final Logger logger = LogManager.getLogger(TransportPutDataFrameTransformAction.class); - private final XPackLicenseState licenseState; private final Client client; private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java index 120f1ef7759..840f94dbd21 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStopDataFrameTransformAction.java @@ -5,64 +5,85 @@ */ package org.elasticsearch.xpack.dataframe.action; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.action.util.PageParams; -import org.elasticsearch.xpack.core.dataframe.DataFrameMessages; import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformTaskState; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; +import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; -import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; - public class TransportStopDataFrameTransformAction extends TransportTasksAction { - private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); private final ThreadPool threadPool; private final DataFrameTransformsConfigManager dataFrameTransformsConfigManager; + private final PersistentTasksService persistentTasksService; @Inject public TransportStopDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, ClusterService clusterService, ThreadPool threadPool, + PersistentTasksService persistentTasksService, DataFrameTransformsConfigManager dataFrameTransformsConfigManager) { super(StopDataFrameTransformAction.NAME, clusterService, transportService, actionFilters, StopDataFrameTransformAction.Request::new, StopDataFrameTransformAction.Response::new, StopDataFrameTransformAction.Response::new, ThreadPool.Names.SAME); this.threadPool = threadPool; this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; + this.persistentTasksService = persistentTasksService; } @Override protected void doExecute(Task task, StopDataFrameTransformAction.Request request, ActionListener listener) { + final ClusterState state = clusterService.state(); + final DiscoveryNodes nodes = state.nodes(); + if (nodes.isLocalNodeElectedMaster() == false) { + // Delegates stop data frame to elected master node so it becomes the coordinating node. + if (nodes.getMasterNode() == null) { + listener.onFailure(new MasterNotDiscoveredException("no known master node")); + } else { + transportService.sendRequest(nodes.getMasterNode(), actionName, request, + new ActionListenerResponseHandler<>(listener, StopDataFrameTransformAction.Response::new)); + } + } else { + final ActionListener finalListener; + if (request.waitForCompletion()) { + finalListener = waitForStopListener(request, listener); + } else { + finalListener = listener; + } - dataFrameTransformsConfigManager.expandTransformIds(request.getId(), new PageParams(0, 10_000), ActionListener.wrap( - expandedIds -> { - request.setExpandedIds(new HashSet<>(expandedIds)); - request.setNodes(DataFrameNodes.dataFrameTaskNodes(expandedIds, clusterService.state())); - super.doExecute(task, request, listener); - }, - listener::onFailure - )); + dataFrameTransformsConfigManager.expandTransformIds(request.getId(), new PageParams(0, 10_000), ActionListener.wrap( + expandedIds -> { + request.setExpandedIds(new HashSet<>(expandedIds)); + request.setNodes(DataFrameNodes.dataFrameTaskNodes(expandedIds, clusterService.state())); + super.doExecute(task, request, finalListener); + }, + listener::onFailure + )); + } } @Override @@ -84,42 +105,9 @@ public class TransportStopDataFrameTransformAction extends RestStatus.CONFLICT)); return; } - if (request.waitForCompletion() == false) { - transformTask.stop(listener); - } else { - ActionListener blockingListener = ActionListener.wrap(response -> { - if (response.isStopped()) { - // The Task acknowledged that it is stopped/stopping... wait until the status actually - // changes over before returning. Switch over to Generic threadpool so - // we don't block the network thread - threadPool.generic().execute(() -> { - try { - long untilInNanos = System.nanoTime() + request.getTimeout().getNanos(); - while (System.nanoTime() - untilInNanos < 0) { - if (transformTask.isStopped()) { - listener.onResponse(response); - return; - } - Thread.sleep(WAIT_FOR_COMPLETION_POLL.millis()); - } - // ran out of time - listener.onFailure(new ElasticsearchTimeoutException( - DataFrameMessages.getMessage(DataFrameMessages.REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_TIMEOUT, - request.getTimeout().getStringRep(), request.getId()))); - } catch (InterruptedException e) { - listener.onFailure(new ElasticsearchException(DataFrameMessages.getMessage( - DataFrameMessages.REST_STOP_TRANSFORM_WAIT_FOR_COMPLETION_INTERRUPT, request.getId()), e)); - } - }); - } else { - // Did not acknowledge stop, just return the response - listener.onResponse(response); - } - }, listener::onFailure); - - transformTask.stop(blockingListener); - } + transformTask.stop(); + listener.onResponse(new StopDataFrameTransformAction.Response(Boolean.TRUE)); } else { listener.onFailure(new RuntimeException("ID of data frame indexer task [" + transformTask.getTransformId() + "] does not match request's ID [" + request.getId() + "]")); @@ -139,4 +127,47 @@ public class TransportStopDataFrameTransformAction extends boolean allStopped = tasks.stream().allMatch(StopDataFrameTransformAction.Response::isStopped); return new StopDataFrameTransformAction.Response(allStopped); } + + private ActionListener + waitForStopListener(StopDataFrameTransformAction.Request request, + ActionListener listener) { + + return ActionListener.wrap( + response -> { + // Wait until the persistent task is stopped + // Switch over to Generic threadpool so we don't block the network thread + threadPool.generic().execute(() -> + waitForDataFrameStopped(request.getExpandedIds(), request.getTimeout(), listener)); + }, + listener::onFailure + ); + } + + private void waitForDataFrameStopped(Collection persistentTaskIds, TimeValue timeout, + ActionListener listener) { + persistentTasksService.waitForPersistentTasksCondition(persistentTasksCustomMetaData -> { + + if (persistentTasksCustomMetaData == null) { + return true; + } + + for (String persistentTaskId : persistentTaskIds) { + if (persistentTasksCustomMetaData.getTask(persistentTaskId) != null) { + return false; + } + } + return true; + + }, timeout, new ActionListener() { + @Override + public void onResponse(Boolean result) { + listener.onResponse(new StopDataFrameTransformAction.Response(Boolean.TRUE)); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java index 183952e0603..125e61b5021 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/rest/action/RestDeleteDataFrameTransformAction.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; @@ -33,7 +34,7 @@ public class RestDeleteDataFrameTransformAction extends BaseRestHandler { DeleteDataFrameTransformAction.Request request = new DeleteDataFrameTransformAction.Request(id); return channel -> client.execute(DeleteDataFrameTransformAction.INSTANCE, request, - new BaseTasksResponseToXContentListener<>(channel)); + new RestToXContentListener<>(channel)); } @Override diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index 2f4945cbeec..bfe0e4f4d77 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -27,7 +27,6 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction; import org.elasticsearch.xpack.core.dataframe.action.StartDataFrameTransformTaskAction.Response; -import org.elasticsearch.xpack.core.dataframe.action.StopDataFrameTransformAction; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransform; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; @@ -86,7 +85,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S String initialReason = null; long initialGeneration = 0; Map initialPosition = null; - logger.info("[{}] init, got state: [{}]", transform.getId(), state != null); + logger.trace("[{}] init, got state: [{}]", transform.getId(), state != null); if (state != null) { initialTaskState = state.getTaskState(); initialReason = state.getReason(); @@ -219,51 +218,17 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S )); } - public synchronized void stop(ActionListener listener) { + public synchronized void stop() { if (getIndexer() == null) { - listener.onFailure(new ElasticsearchException("Task for transform [{}] not fully initialized. Try again later", - getTransformId())); return; } // taskState is initialized as STOPPED and is updated in tandem with the indexerState // Consequently, if it is STOPPED, we consider the whole task STOPPED. if (taskState.get() == DataFrameTransformTaskState.STOPPED) { - listener.onResponse(new StopDataFrameTransformAction.Response(true)); return; } - final IndexerState newState = getIndexer().stop(); - switch (newState) { - case STOPPED: - // Fall through to `STOPPING` as the behavior is the same for both, we should persist for both - case STOPPING: - // update the persistent state to STOPPED. There are two scenarios and both are safe: - // 1. we persist STOPPED now, indexer continues a bit then sees the flag and checkpoints another STOPPED with the more recent - // position. - // 2. we persist STOPPED now, indexer continues a bit but then dies. When/if we resume we'll pick up at last checkpoint, - // overwrite some docs and eventually checkpoint. - taskState.set(DataFrameTransformTaskState.STOPPED); - DataFrameTransformState state = new DataFrameTransformState( - DataFrameTransformTaskState.STOPPED, - IndexerState.STOPPED, - getIndexer().getPosition(), - currentCheckpoint.get(), - stateReason.get(), - getIndexer().getProgress()); - persistStateToClusterState(state, ActionListener.wrap( - task -> { - auditor.info(transform.getId(), "Updated state to [" + state.getTaskState() + "]"); - listener.onResponse(new StopDataFrameTransformAction.Response(true)); - }, - exc -> listener.onFailure(new ElasticsearchException( - "Error while updating state for data frame transform [{}] to [{}]", exc, - transform.getId(), - state.getIndexerState())))); - break; - default: - listener.onFailure(new ElasticsearchException("Cannot stop task for data frame transform [{}], because state was [{}]", - transform.getId(), newState)); - break; - } + + getIndexer().stop(); } @Override @@ -281,12 +246,10 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S /** * Attempt to gracefully cleanup the data frame transform so it can be terminated. - * This tries to remove the job from the scheduler, and potentially any other - * cleanup operations in the future + * This tries to remove the job from the scheduler and completes the persistent task */ synchronized void shutdown() { try { - logger.info("Data frame indexer [" + transform.getId() + "] received abort request, stopping indexer."); schedulerEngine.remove(SCHEDULE_NAME + "_" + transform.getId()); schedulerEngine.unregister(this); } catch (Exception e) { @@ -613,6 +576,13 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } } + @Override + protected void onStop() { + auditor.info(transformConfig.getId(), "Indexer has stopped"); + logger.info("Data frame transform [{}] indexer has stopped", transformConfig.getId()); + transformTask.shutdown(); + } + @Override protected void onAbort() { auditor.info(transformConfig.getId(), "Received abort request, stopping indexer"); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index f1ac07b7234..1e9223b79f2 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -106,6 +106,7 @@ teardown: - do: data_frame.stop_data_frame_transform: transform_id: "airline-transform-start-stop" + wait_for_completion: true - match: { stopped: true } - do: @@ -199,6 +200,7 @@ teardown: - do: data_frame.stop_data_frame_transform: transform_id: "airline-transform-start-later" + wait_for_completion: true - match: { stopped: true } - do: @@ -232,6 +234,8 @@ teardown: - do: data_frame.stop_data_frame_transform: transform_id: "_all" + wait_for_completion: true + - match: { stopped: true } - do: From ffefc66260ae3791f4fb561ee27e91cf964ea8a6 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Fri, 10 May 2019 16:28:28 +0100 Subject: [PATCH 16/25] Mute failing AsyncTwoPhaseIndexerTests See https://github.com/elastic/elasticsearch/issues/42084 --- .../xpack/core/indexing/AsyncTwoPhaseIndexerTests.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index e56491bdb57..27fba82338a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -222,6 +222,7 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { } } + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStateMachine() throws Exception { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); @@ -264,6 +265,7 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { } } + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStop_AfterIndexerIsFinished() throws InterruptedException { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); @@ -283,6 +285,7 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { } } + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStop_WhileIndexing() throws InterruptedException { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); From 0449869511dc75c399a3e60a581ae3d93cf77b74 Mon Sep 17 00:00:00 2001 From: jimczi Date: Tue, 21 May 2019 12:28:57 +0200 Subject: [PATCH 17/25] Fix unchecked warning in RollupIndexerIndexingTests#testSimpleDateHistoWithOverlappingDelay --- .../xpack/rollup/job/RollupIndexerIndexingTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index f2797413c80..1641ac6b7f7 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -332,7 +332,7 @@ public class RollupIndexerIndexingTests extends AggregatorTestCase { String rollupIndex = randomAlphaOfLengthBetween(5, 10); String field = "the_histo"; DateHistogramGroupConfig dateHistoConfig = - new DateHistogramGroupConfig(field, new DateHistogramInterval("1h"), new DateHistogramInterval("15m"), null); + new FixedInterval(field, new DateHistogramInterval("1h"), new DateHistogramInterval("15m"), null); RollupJobConfig job = createJob(rollupIndex, new GroupConfig(dateHistoConfig), Collections.emptyList()); final List> dataset = new ArrayList<>(); long now = asLong("2015-04-01T10:30:00.000Z"); From 216c74d10aebc0566b8de15088ff660f33af38d5 Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Tue, 21 May 2019 06:36:38 -0400 Subject: [PATCH 18/25] Add experimental and warnings to vector functions (#42205) --- docs/reference/query-dsl/script-score-query.asciidoc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index 1a68ce83749..5fe723c73d7 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -74,10 +74,18 @@ to be the most efficient by using the internal mechanisms. [[vector-functions]] ===== Functions for vector fields + +experimental[] + These functions are used for for <> and <> fields. +NOTE: During vector functions' calculation, all matched documents are +linearly scanned. Thus, expect the query time grow linearly +with the number of matched documents. For this reason, we recommend +to limit the number of matched documents with a `query` parameter. + For dense_vector fields, `cosineSimilarity` calculates the measure of cosine similarity between a given query vector and document vectors. From de096485c8c1878aaf22405d6d619216402a584a Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Tue, 21 May 2019 13:49:42 +0300 Subject: [PATCH 19/25] Use spearate testkit dir for each run (#42013) Gradle Testkit reuses the teskit dir by default between tests. With this change we use a temporary one for each run hoping it will fix #41431 --- ...portElasticsearchBuildResourcesTaskIT.java | 34 +++++++------------ .../gradle/precommit/JarHellTaskIT.java | 5 +-- .../test/GradleIntegrationTestCase.java | 16 ++++++++- .../testclusters/TestClustersPluginIT.java | 14 +++++--- 4 files changed, 38 insertions(+), 31 deletions(-) diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java index 99afd0bcbe0..7968f4f57cf 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/ExportElasticsearchBuildResourcesTaskIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.gradle; import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; -import org.gradle.testkit.runner.GradleRunner; public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTestCase { @@ -29,25 +28,19 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe public static final String PROJECT_NAME = "elasticsearch-build-resources"; public void testUpToDateWithSourcesConfigured() { - GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + getGradleRunner(PROJECT_NAME) .withArguments("clean", "-s") - .withPluginClasspath() .build(); - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + BuildResult result = getGradleRunner(PROJECT_NAME) .withArguments("buildResources", "-s", "-i") - .withPluginClasspath() .build(); assertTaskSuccessful(result, ":buildResources"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle_suppressions.xml"); - result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + result = getGradleRunner(PROJECT_NAME) .withArguments("buildResources", "-s", "-i") - .withPluginClasspath() .build(); assertTaskUpToDate(result, ":buildResources"); assertBuildFileExists(result, PROJECT_NAME, "build-tools-exported/checkstyle.xml"); @@ -55,10 +48,8 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe } public void testImplicitTaskDependencyCopy() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + BuildResult result = getGradleRunner(PROJECT_NAME) .withArguments("clean", "sampleCopyAll", "-s", "-i") - .withPluginClasspath() .build(); assertTaskSuccessful(result, ":buildResources"); @@ -69,10 +60,8 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe } public void testImplicitTaskDependencyInputFileOfOther() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) + BuildResult result = getGradleRunner(PROJECT_NAME) .withArguments("clean", "sample", "-s", "-i") - .withPluginClasspath() .build(); assertTaskSuccessful(result, ":sample"); @@ -81,11 +70,12 @@ public class ExportElasticsearchBuildResourcesTaskIT extends GradleIntegrationTe } public void testIncorrectUsage() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir(PROJECT_NAME)) - .withArguments("noConfigAfterExecution", "-s", "-i") - .withPluginClasspath() - .buildAndFail(); - assertOutputContains("buildResources can't be configured after the task ran"); + assertOutputContains( + getGradleRunner(PROJECT_NAME) + .withArguments("noConfigAfterExecution", "-s", "-i") + .buildAndFail() + .getOutput(), + "buildResources can't be configured after the task ran" + ); } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java index e5624a15d92..d45028d8445 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/JarHellTaskIT.java @@ -2,7 +2,6 @@ package org.elasticsearch.gradle.precommit; import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; -import org.gradle.testkit.runner.GradleRunner; /* * Licensed to Elasticsearch under one or more contributor @@ -25,10 +24,8 @@ import org.gradle.testkit.runner.GradleRunner; public class JarHellTaskIT extends GradleIntegrationTestCase { public void testJarHellDetected() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("jarHell")) + BuildResult result = getGradleRunner("jarHell") .withArguments("clean", "precommit", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath()) - .withPluginClasspath() .buildAndFail(); assertTaskFailed(result, ":jarHell"); diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index f7a0382cec7..46a9194780c 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -4,8 +4,12 @@ import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.BuildTask; import org.gradle.testkit.runner.GradleRunner; import org.gradle.testkit.runner.TaskOutcome; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; import java.io.File; +import java.io.IOException; +import java.io.UncheckedIOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.List; @@ -16,6 +20,9 @@ import java.util.stream.Stream; public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { + @Rule + public TemporaryFolder testkitTmpDir = new TemporaryFolder(); + protected File getProjectDir(String name) { File root = new File("src/testKit/"); if (root.exists() == false) { @@ -26,9 +33,16 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { } protected GradleRunner getGradleRunner(String sampleProject) { + File testkit; + try { + testkit = testkitTmpDir.newFolder(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } return GradleRunner.create() .withProjectDir(getProjectDir(sampleProject)) - .withPluginClasspath(); + .withPluginClasspath() + .withTestKitDir(testkit); } protected File getBuildDir(String name) { diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index 84b13340c35..c9086d1459a 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -21,12 +21,21 @@ package org.elasticsearch.gradle.testclusters; import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; +import org.junit.Before; import org.junit.Ignore; + import java.util.Arrays; public class TestClustersPluginIT extends GradleIntegrationTestCase { + private GradleRunner runner; + + @Before + public void setUp() throws Exception { + runner = getGradleRunner("testclusters"); + } + public void testListClusters() { BuildResult result = getTestClustersRunner("listTestClusters").build(); @@ -190,10 +199,7 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase { arguments[tasks.length] = "-s"; arguments[tasks.length + 1] = "-i"; arguments[tasks.length + 2] = "-Dlocal.repo.path=" + getLocalTestRepoPath(); - return GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments(arguments) - .withPluginClasspath(); + return runner.withArguments(arguments); } private void assertStartedAndStoppedOnce(BuildResult result, String nodeName) { From a6204a5eafa6460d9043a9b0b6f934048527138c Mon Sep 17 00:00:00 2001 From: Glen Smith Date: Tue, 21 May 2019 14:59:40 +0200 Subject: [PATCH 20/25] Remove stray back tick that's messing up table format (#41705) --- docs/reference/cat/thread_pool.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index d1ea1fad885..03854fae2f6 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -59,7 +59,7 @@ ml_autodetect (default distro only) ml_datafeed (default distro only) ml_utility (default distro only) refresh -rollup_indexing (default distro only)` +rollup_indexing (default distro only) search security-token-key (default distro only) snapshot From cef4b9ba76ff297efbebbf48483b0ad7b6e07a02 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Tue, 21 May 2019 16:37:11 +0300 Subject: [PATCH 21/25] Move the FIPS configuration back to the build plugin (#41989) * Move the FIPS configuration back to the build plugin This is necesary for external users of build-tools. Closes #41721 --- build.gradle | 15 --------------- .../org/elasticsearch/gradle/BuildPlugin.groovy | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/build.gradle b/build.gradle index c28a1896912..9b4aa3ed9c7 100644 --- a/build.gradle +++ b/build.gradle @@ -619,21 +619,6 @@ allprojects { } } -subprojects { - // Common config when running with a FIPS-140 runtime JVM - if (project.ext.has("inFipsJvm") && project.ext.inFipsJvm) { - tasks.withType(Test) { - systemProperty 'javax.net.ssl.trustStorePassword', 'password' - systemProperty 'javax.net.ssl.keyStorePassword', 'password' - } - project.pluginManager.withPlugin("elasticsearch.testclusters") { - project.testClusters.all { - systemProperty 'javax.net.ssl.trustStorePassword', 'password' - systemProperty 'javax.net.ssl.keyStorePassword', 'password' - } - } - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 0357c2f76ef..d02d9d417c9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -116,6 +116,22 @@ class BuildPlugin implements Plugin { configureTestTasks(project) configurePrecommit(project) configureDependenciesInfo(project) + + // Common config when running with a FIPS-140 runtime JVM + // Need to do it here to support external plugins + if (project.ext.inFipsJvm) { + project.tasks.withType(Test) { + systemProperty 'javax.net.ssl.trustStorePassword', 'password' + systemProperty 'javax.net.ssl.keyStorePassword', 'password' + } + project.pluginManager.withPlugin("elasticsearch.testclusters") { + project.testClusters.all { + systemProperty 'javax.net.ssl.trustStorePassword', 'password' + systemProperty 'javax.net.ssl.keyStorePassword', 'password' + } + } + } + } From 75425ae167d1f2c2ac489f0f389e62d2d37de4a5 Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Tue, 21 May 2019 15:51:55 +0200 Subject: [PATCH 22/25] Remove 7.0.2 (#42282) 7.0.2 removed, since it will never be, fixing branch consistency check. --- server/src/main/java/org/elasticsearch/Version.java | 4 ---- 1 file changed, 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index b0bfe9a44b3..8945a0f6138 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -136,8 +136,6 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_0_1_ID = 7000199; public static final Version V_7_0_1 = new Version(V_7_0_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final int V_7_0_2_ID = 7000299; - public static final Version V_7_0_2 = new Version(V_7_0_2_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_0_ID = 7010099; public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); public static final int V_7_1_1_ID = 7010199; @@ -163,8 +161,6 @@ public class Version implements Comparable, ToXContentFragment { return V_7_1_1; case V_7_1_0_ID: return V_7_1_0; - case V_7_0_2_ID: - return V_7_0_2; case V_7_0_1_ID: return V_7_0_1; case V_7_0_0_ID: From 0fd42ce1f5944b094fbddfdef05a27cd3e6c1b60 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 21 May 2019 15:48:45 +0100 Subject: [PATCH 23/25] [ML Data Frame] Start directly data frame rather than via the scheduler (#42224) Trigger indexer start directly to put the indexer in INDEXING state immediately --- .../client/DataFrameTransformIT.java | 4 ++- ...FrameTransformPersistentTasksExecutor.java | 24 ++++--------- .../transforms/DataFrameTransformTask.java | 35 ++++++++++++++----- .../test/data_frame/transforms_start_stop.yml | 8 ++--- .../test/data_frame/transforms_stats.yml | 12 +++---- 5 files changed, 46 insertions(+), 37 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index 1bd49154ee5..40cd6f454cd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -72,6 +72,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.oneOf; public class DataFrameTransformIT extends ESRestHighLevelClientTestCase { @@ -264,7 +265,8 @@ public class DataFrameTransformIT extends ESRestHighLevelClientTestCase { GetDataFrameTransformStatsResponse statsResponse = execute(new GetDataFrameTransformStatsRequest(id), client::getDataFrameTransformStats, client::getDataFrameTransformStatsAsync); assertThat(statsResponse.getTransformsStateAndStats(), hasSize(1)); - assertEquals(IndexerState.STARTED, statsResponse.getTransformsStateAndStats().get(0).getTransformState().getIndexerState()); + IndexerState indexerState = statsResponse.getTransformsStateAndStats().get(0).getTransformState().getIndexerState(); + assertThat(indexerState, is(oneOf(IndexerState.STARTED, IndexerState.INDEXING))); StopDataFrameTransformRequest stopRequest = new StopDataFrameTransformRequest(id, Boolean.TRUE, null); StopDataFrameTransformResponse stopResponse = diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java index d0f15197c3c..5b0c0e7dfc1 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformPersistentTasksExecutor.java @@ -106,8 +106,6 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx protected void nodeOperation(AllocatedPersistentTask task, @Nullable DataFrameTransform params, PersistentTaskState state) { final String transformId = params.getId(); final DataFrameTransformTask buildTask = (DataFrameTransformTask) task; - final SchedulerEngine.Job schedulerJob = new SchedulerEngine.Job(DataFrameTransformTask.SCHEDULE_NAME + "_" + transformId, - next()); final DataFrameTransformState transformState = (DataFrameTransformState) state; final DataFrameTransformTask.ClientDataFrameIndexerBuilder indexerBuilder = @@ -137,7 +135,7 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx stats -> { indexerBuilder.setInitialStats(stats); buildTask.initializeIndexer(indexerBuilder); - scheduleAndStartTask(buildTask, schedulerJob, startTaskListener); + startTask(buildTask, startTaskListener); }, error -> { if (error instanceof ResourceNotFoundException == false) { @@ -145,7 +143,7 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx } indexerBuilder.setInitialStats(new DataFrameIndexerTransformStats(transformId)); buildTask.initializeIndexer(indexerBuilder); - scheduleAndStartTask(buildTask, schedulerJob, startTaskListener); + startTask(buildTask, startTaskListener); } ); @@ -218,30 +216,20 @@ public class DataFrameTransformPersistentTasksExecutor extends PersistentTasksEx } } - private void scheduleAndStartTask(DataFrameTransformTask buildTask, - SchedulerEngine.Job schedulerJob, - ActionListener listener) { - // Note that while the task is added to the scheduler here, the internal state will prevent - // it from doing any work until the task is "started" via the StartTransform api - schedulerEngine.register(buildTask); - schedulerEngine.add(schedulerJob); - logger.info("Data frame transform [{}] created.", buildTask.getTransformId()); + private void startTask(DataFrameTransformTask buildTask, + ActionListener listener) { // If we are stopped, and it is an initial run, this means we have never been started, // attempt to start the task if (buildTask.getState().getTaskState().equals(DataFrameTransformTaskState.STOPPED) && buildTask.isInitialRun()) { + logger.info("Data frame transform [{}] created.", buildTask.getTransformId()); buildTask.start(listener); + } else { logger.debug("No need to start task. Its current state is: {}", buildTask.getState().getIndexerState()); listener.onResponse(new StartDataFrameTransformTaskAction.Response(true)); } } - static SchedulerEngine.Schedule next() { - return (startTime, now) -> { - return now + 1000; // to be fixed, hardcode something - }; - } - @Override protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId, PersistentTasksCustomMetaData.PersistentTask persistentTask, Map headers) { diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java index bfe0e4f4d77..ee8767e2235 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameTransformTask.java @@ -208,6 +208,10 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S persistStateToClusterState(state, ActionListener.wrap( task -> { auditor.info(transform.getId(), "Updated state to [" + state.getTaskState() + "]"); + long now = System.currentTimeMillis(); + // kick off the indexer + triggered(new Event(schedulerJobName(), now, now)); + registerWithSchedulerJob(); listener.onResponse(new StartDataFrameTransformTaskAction.Response(true)); }, exc -> { @@ -238,7 +242,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S return; } // for now no rerun, so only trigger if checkpoint == 0 - if (currentCheckpoint.get() == 0 && event.getJobName().equals(SCHEDULE_NAME + "_" + transform.getId())) { + if (currentCheckpoint.get() == 0 && event.getJobName().equals(schedulerJobName())) { logger.debug("Data frame indexer [{}] schedule has triggered, state: [{}]", event.getJobName(), getIndexer().getState()); getIndexer().maybeTriggerAsyncJob(System.currentTimeMillis()); } @@ -249,13 +253,7 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S * This tries to remove the job from the scheduler and completes the persistent task */ synchronized void shutdown() { - try { - schedulerEngine.remove(SCHEDULE_NAME + "_" + transform.getId()); - schedulerEngine.unregister(this); - } catch (Exception e) { - markAsFailed(e); - return; - } + deregisterSchedulerJob(); markAsCompleted(); } @@ -311,6 +309,27 @@ public class DataFrameTransformTask extends AllocatedPersistentTask implements S } } + private void registerWithSchedulerJob() { + schedulerEngine.register(this); + final SchedulerEngine.Job schedulerJob = new SchedulerEngine.Job(schedulerJobName(), next()); + schedulerEngine.add(schedulerJob); + } + + private void deregisterSchedulerJob() { + schedulerEngine.remove(schedulerJobName()); + schedulerEngine.unregister(this); + } + + private String schedulerJobName() { + return DataFrameTransformTask.SCHEDULE_NAME + "_" + getTransformId(); + } + + private SchedulerEngine.Schedule next() { + return (startTime, now) -> { + return now + 1000; // to be fixed, hardcode something + }; + } + synchronized void initializeIndexer(ClientDataFrameIndexerBuilder indexerBuilder) { indexer.set(indexerBuilder.build(this)); } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml index 1e9223b79f2..8b30fd1186b 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_start_stop.yml @@ -100,7 +100,7 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - do: @@ -127,7 +127,7 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } --- @@ -168,7 +168,7 @@ teardown: transform_id: "airline-transform-start-stop" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-stop" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - do: @@ -194,7 +194,7 @@ teardown: transform_id: "airline-transform-start-later" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-start-later" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - do: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml index 33b0f40863a..93c942f0733 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/transforms_stats.yml @@ -47,18 +47,18 @@ teardown: transform_id: "airline-transform-stats" - match: { count: 1 } - match: { transforms.0.id: "airline-transform-stats" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.0.state.task_state: "started" } - match: { transforms.0.state.checkpoint: 0 } - - match: { transforms.0.stats.pages_processed: 0 } + - lte: { transforms.0.stats.pages_processed: 1 } - match: { transforms.0.stats.documents_processed: 0 } - match: { transforms.0.stats.documents_indexed: 0 } - - match: { transforms.0.stats.trigger_count: 0 } + - match: { transforms.0.stats.trigger_count: 1 } - match: { transforms.0.stats.index_time_in_ms: 0 } - match: { transforms.0.stats.index_total: 0 } - match: { transforms.0.stats.index_failures: 0 } - - match: { transforms.0.stats.search_time_in_ms: 0 } - - match: { transforms.0.stats.search_total: 0 } + - gte: { transforms.0.stats.search_time_in_ms: 0 } + - lte: { transforms.0.stats.search_total: 1 } - match: { transforms.0.stats.search_failures: 0 } --- @@ -172,7 +172,7 @@ teardown: transform_id: "_all" - match: { count: 2 } - match: { transforms.0.id: "airline-transform-stats" } - - match: { transforms.0.state.indexer_state: "started" } + - match: { transforms.0.state.indexer_state: "/started|indexing/" } - match: { transforms.1.id: "airline-transform-stats-dos" } - match: { transforms.1.state.indexer_state: "stopped" } From a4e6fb4dd2c2ec24638f675a5901b28809c4e6e7 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Tue, 21 May 2019 18:03:24 +0300 Subject: [PATCH 24/25] [ML] Fix logger declaration in ML plugins (#42222) (#42238) This corrects what appears to have been a copy-paste error where the logger for `MachineLearning` and `DataFrame` was wrongly set to be that of `XPackPlugin`. --- .../java/org/elasticsearch/xpack/dataframe/DataFrame.java | 8 +------- .../java/org/elasticsearch/xpack/ml/MachineLearning.java | 2 +- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java index b7e6c235f8e..34343e5fe88 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/DataFrame.java @@ -76,10 +76,8 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.function.Supplier; import java.util.function.UnaryOperator; @@ -90,11 +88,7 @@ public class DataFrame extends Plugin implements ActionPlugin, PersistentTaskPlu public static final String NAME = "data_frame"; public static final String TASK_THREAD_POOL_NAME = "data_frame_indexing"; - // list of headers that will be stored when a transform is created - public static final Set HEADER_FILTERS = new HashSet<>( - Arrays.asList("es-security-runas-user", "_xpack_security_authentication")); - - private static final Logger logger = LogManager.getLogger(XPackPlugin.class); + private static final Logger logger = LogManager.getLogger(DataFrame.class); private final boolean enabled; private final Settings settings; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 67acc1d0d67..861eea809ca 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -300,7 +300,7 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu public static final Setting MIN_DISK_SPACE_OFF_HEAP = Setting.byteSizeSetting("xpack.ml.min_disk_space_off_heap", new ByteSizeValue(5, ByteSizeUnit.GB), Setting.Property.NodeScope); - private static final Logger logger = LogManager.getLogger(XPackPlugin.class); + private static final Logger logger = LogManager.getLogger(MachineLearning.class); private final Settings settings; private final Environment env; From ecd033bea62489e70c60763cfc117d0b96534fa0 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 21 May 2019 17:20:52 +0200 Subject: [PATCH 25/25] Cleanup Various Uses of ActionListener (#40126) (#42274) * Cleanup Various Uses of ActionListener * Use shorter `map`, `runAfter` or `wrap` where functionally equivalent to anonymous class * Use ActionRunnable where functionally equivalent --- .../tasks/get/TransportGetTaskAction.java | 28 +++----- .../TransportSnapshotsStatusAction.java | 22 ++----- .../upgrade/post/TransportUpgradeAction.java | 25 ++----- .../action/bulk/BulkRequestHandler.java | 21 ++---- .../ingest/PutPipelineTransportAction.java | 25 ++----- .../support/ThreadedActionListener.java | 10 +-- .../broadcast/TransportBroadcastAction.java | 42 ++++-------- ...ransportInstanceSingleOperationAction.java | 29 +++----- .../shard/TransportSingleShardAction.java | 9 +-- .../support/tasks/TransportTasksAction.java | 18 +---- .../elasticsearch/search/SearchService.java | 48 ++------------ .../transport/TransportKeepAlive.java | 5 +- .../action/RejectionActionIT.java | 6 +- .../node/tasks/TransportTasksActionTests.java | 12 +--- .../search/ClearScrollControllerTests.java | 66 ++++++------------- .../TransportActionFilterChainTests.java | 11 ++-- .../TransportWriteActionTests.java | 12 +--- .../decider/EnableAssignmentDeciderIT.java | 14 +--- 18 files changed, 96 insertions(+), 307 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index fe07a4efe93..d1d72da5445 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -203,27 +203,15 @@ public class TransportGetTaskAction extends HandledTransportAction() { - @Override - public void onResponse(GetResponse getResponse) { - try { - onGetFinishedTaskFromIndex(getResponse, listener); - } catch (Exception e) { - listener.onFailure(e); - } + client.get(get, ActionListener.wrap(r -> onGetFinishedTaskFromIndex(r, listener), e -> { + if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) { + // We haven't yet created the index for the task results so it can't be found. + listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e, + request.getTaskId())); + } else { + listener.onFailure(e); } - - @Override - public void onFailure(Exception e) { - if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) { - // We haven't yet created the index for the task results so it can't be found. - listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e, - request.getTaskId())); - } else { - listener.onFailure(e); - } - } - }); + })); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 5dfc24d1e28..c2f0d3dd0c0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -119,23 +119,11 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction() { - @Override - public void onResponse(TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) { - try { - List currentSnapshots = - snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots())); - listener.onResponse(buildResponse(request, currentSnapshots, nodeSnapshotStatuses)); - } catch (Exception e) { - listener.onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + transportNodesSnapshotsStatus.execute(nodesRequest, + ActionListener.map( + listener, nodeSnapshotStatuses -> + buildResponse(request, snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots())), + nodeSnapshotStatuses))); } else { // We don't have any in-progress shards, just return current stats listener.onResponse(buildResponse(request, currentSnapshots, null)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java index f2d046f3321..b122350c3e6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/upgrade/post/TransportUpgradeAction.java @@ -184,26 +184,13 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction listener) { - ActionListener settingsUpdateListener = new ActionListener() { - @Override - public void onResponse(UpgradeResponse upgradeResponse) { - try { - if (upgradeResponse.versions().isEmpty()) { - listener.onResponse(upgradeResponse); - } else { - updateSettings(upgradeResponse, listener); - } - } catch (Exception e) { - listener.onFailure(e); - } + super.doExecute(task, request, ActionListener.wrap(upgradeResponse -> { + if (upgradeResponse.versions().isEmpty()) { + listener.onResponse(upgradeResponse); + } else { + updateSettings(upgradeResponse, listener); } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }; - super.doExecute(task, request, settingsUpdateListener); + }, listener::onFailure)); } private void updateSettings(final UpgradeResponse upgradeResponse, final ActionListener listener) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java index 2f5db520088..7890fb4e83f 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java @@ -59,27 +59,20 @@ public final class BulkRequestHandler { semaphore.acquire(); toRelease = semaphore::release; CountDownLatch latch = new CountDownLatch(1); - retry.withBackoff(consumer, bulkRequest, new ActionListener() { + retry.withBackoff(consumer, bulkRequest, ActionListener.runAfter(new ActionListener() { @Override public void onResponse(BulkResponse response) { - try { - listener.afterBulk(executionId, bulkRequest, response); - } finally { - semaphore.release(); - latch.countDown(); - } + listener.afterBulk(executionId, bulkRequest, response); } @Override public void onFailure(Exception e) { - try { - listener.afterBulk(executionId, bulkRequest, e); - } finally { - semaphore.release(); - latch.countDown(); - } + listener.afterBulk(executionId, bulkRequest, e); } - }); + }, () -> { + semaphore.release(); + latch.countDown(); + })); bulkRequestSetupSuccessful = true; if (concurrentRequests == 0) { latch.await(); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java index 97f13bf71d1..be1528a354b 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/PutPipelineTransportAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; -import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -74,25 +73,13 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction() { - @Override - public void onResponse(NodesInfoResponse nodeInfos) { - try { - Map ingestInfos = new HashMap<>(); - for (NodeInfo nodeInfo : nodeInfos.getNodes()) { - ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest()); - } - ingestService.putPipeline(ingestInfos, request, listener); - } catch (Exception e) { - onFailure(e); - } + client.admin().cluster().nodesInfo(nodesInfoRequest, ActionListener.wrap(nodeInfos -> { + Map ingestInfos = new HashMap<>(); + for (NodeInfo nodeInfo : nodeInfos.getNodes()) { + ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest()); } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + ingestService.putPipeline(ingestInfos, request, listener); + }, listener::onFailure)); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java b/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java index dfcf6445abf..9af3e9a3315 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/ThreadedActionListener.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.support; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.settings.Settings; @@ -86,21 +87,16 @@ public final class ThreadedActionListener implements ActionListener(listener) { @Override public boolean isForceExecution() { return forceExecution; } @Override - protected void doRun() throws Exception { + protected void doRun() { listener.onResponse(response); } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } }); } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java index 87c9e153241..15daaf786b6 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastAction.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.support.broadcast; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -36,7 +37,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; @@ -287,45 +287,25 @@ public abstract class TransportBroadcastAction< @Override public void messageReceived(ShardRequest request, TransportChannel channel, Task task) throws Exception { - asyncShardOperation(request, task, new ActionListener() { - @Override - public void onResponse(ShardResponse response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); + asyncShardOperation(request, task, + ActionListener.wrap(channel::sendResponse, e -> { + try { + channel.sendResponse(e); + } catch (Exception e1) { + logger.warn(() -> new ParameterizedMessage( + "Failed to send error response for action [{}] and request [{}]", actionName, request), e1); + } } - } - - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (Exception e1) { - logger.warn(() -> new ParameterizedMessage( - "Failed to send error response for action [{}] and request [{}]", actionName, request), e1); - } - } - }); + )); } } protected void asyncShardOperation(ShardRequest request, Task task, ActionListener listener) { - transportService.getThreadPool().executor(getExecutor(request)).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - + transportService.getThreadPool().executor(shardExecutor).execute(new ActionRunnable(listener) { @Override protected void doRun() throws Exception { listener.onResponse(shardOperation(request, task)); } }); } - - protected String getExecutor(ShardRequest request) { - return shardExecutor; - } - } diff --git a/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java index c575c3b2338..d1d7b6ffac5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java @@ -254,27 +254,16 @@ public abstract class TransportInstanceSingleOperationAction< @Override public void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception { - shardOperation(request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); + shardOperation(request, + ActionListener.wrap(channel::sendResponse, e -> { + try { + channel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn("failed to send response for get", inner); + } } - } - - @Override - public void onFailure(Exception e) { - try { - channel.sendResponse(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.warn("failed to send response for get", inner); - } - } - }); - + )); } } } diff --git a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java index 3c2e7f9a49e..81763c88a6b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/single/shard/TransportSingleShardAction.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.support.single.shard; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ChannelActionListener; @@ -40,7 +41,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.LoggerMessageFormat; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -107,12 +107,7 @@ public abstract class TransportSingleShardAction listener) throws IOException { - threadPool.executor(getExecutor(request, shardId)).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - + threadPool.executor(getExecutor(request, shardId)).execute(new ActionRunnable(listener) { @Override protected void doRun() throws Exception { listener.onResponse(shardOperation(request, shardId)); diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java index c2f9872ca5c..8d80a15beb1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/TransportTasksAction.java @@ -329,19 +329,8 @@ public abstract class TransportTasksAction< @Override public void messageReceived(final NodeTaskRequest request, final TransportChannel channel, Task task) throws Exception { - nodeOperation(request, new ActionListener() { - @Override - public void onResponse( - TransportTasksAction.NodeTasksResponse response) { - try { - channel.sendResponse(response); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { + nodeOperation(request, ActionListener.wrap(channel::sendResponse, + e -> { try { channel.sendResponse(e); } catch (IOException e1) { @@ -349,11 +338,10 @@ public abstract class TransportTasksAction< logger.warn("Failed to send failure", e1); } } - }); + )); } } - private class NodeTaskRequest extends TransportRequest { private TasksRequest tasksRequest; diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 0b22f5d6606..bf950ac23df 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.SearchType; @@ -39,7 +40,6 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.core.internal.io.IOUtils; @@ -302,21 +302,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } public void executeDfsPhase(ShardSearchRequest request, SearchTask task, ActionListener listener) { - rewriteShardRequest(request, new ActionListener() { - @Override - public void onResponse(ShardSearchRequest request) { - try { - listener.onResponse(executeDfsPhase(request, task)); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + rewriteShardRequest(request, ActionListener.map(listener, r -> executeDfsPhase(r, task))); } private DfsSearchResult executeDfsPhase(ShardSearchRequest request, SearchTask task) throws IOException { @@ -351,30 +337,11 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv } public void executeQueryPhase(ShardSearchRequest request, SearchTask task, ActionListener listener) { - rewriteShardRequest(request, new ActionListener() { - @Override - public void onResponse(ShardSearchRequest request) { - try { - listener.onResponse(executeQueryPhase(request, task)); - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); + rewriteShardRequest(request, ActionListener.map(listener, r -> executeQueryPhase(r, task))); } private void runAsync(long id, Supplier executable, ActionListener listener) { - getExecutor(id).execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - + getExecutor(id).execute(new ActionRunnable(listener) { @Override protected void doRun() { listener.onResponse(executable.get()); @@ -1058,12 +1025,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv ActionListener actionListener = ActionListener.wrap(r -> // now we need to check if there is a pending refresh and register shard.awaitShardSearchActive(b -> - executor.execute(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - + executor.execute(new ActionRunnable(listener) { @Override protected void doRun() { listener.onResponse(request); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java b/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java index fc7ebe4b964..9e49d06f2b0 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java @@ -84,10 +84,7 @@ final class TransportKeepAlive implements Closeable { for (TcpChannel channel : nodeChannels) { scheduledPing.addChannel(channel); - - channel.addCloseListener(ActionListener.wrap(() -> { - scheduledPing.removeChannel(channel); - })); + channel.addCloseListener(ActionListener.wrap(() -> scheduledPing.removeChannel(channel))); } } diff --git a/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java b/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java index ad2447cb7b3..e0ef29bf7f4 100644 --- a/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java +++ b/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java @@ -65,19 +65,17 @@ public class RejectionActionIT extends ESIntegTestCase { client().prepareSearch("test") .setSearchType(SearchType.QUERY_THEN_FETCH) .setQuery(QueryBuilders.matchQuery("field", "1")) - .execute(new ActionListener() { + .execute(new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(SearchResponse searchResponse) { responses.add(searchResponse); - latch.countDown(); } @Override public void onFailure(Exception e) { responses.add(e); - latch.countDown(); } - }); + }, latch)); } latch.await(); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index ec8af36dbf2..190f85d635b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -470,17 +470,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { connectNodes(testNodes); CountDownLatch checkLatch = new CountDownLatch(1); CountDownLatch responseLatch = new CountDownLatch(1); - Task task = startBlockingTestNodesAction(checkLatch, new ActionListener() { - @Override - public void onResponse(NodesResponse nodeResponses) { - responseLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - responseLatch.countDown(); - } - }); + Task task = startBlockingTestNodesAction(checkLatch, ActionListener.wrap(responseLatch::countDown)); String actionName = "internal:testAction"; // only pick the main action // Try to cancel main task using action name diff --git a/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java index 55c39f735ce..bcb4a1200b7 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ClearScrollControllerTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -42,32 +43,24 @@ import java.util.concurrent.atomic.AtomicInteger; public class ClearScrollControllerTests extends ESTestCase { - public void testClearAll() throws IOException, InterruptedException { + public void testClearAll() throws InterruptedException { DiscoveryNode node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNode node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); CountDownLatch latch = new CountDownLatch(1); - ActionListener listener = new ActionListener() { + ActionListener listener = new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(ClearScrollResponse clearScrollResponse) { - try { - assertEquals(3, clearScrollResponse.getNumFreed()); - assertTrue(clearScrollResponse.isSucceeded()); - } finally { - latch.countDown(); - } + assertEquals(3, clearScrollResponse.getNumFreed()); + assertTrue(clearScrollResponse.isSucceeded()); } @Override public void onFailure(Exception e) { - try { - throw new AssertionError(e); - } finally { - latch.countDown(); - } + throw new AssertionError(e); } - }; + }, latch); List nodesInvoked = new CopyOnWriteArrayList<>(); SearchTransportService searchTransportService = new SearchTransportService(null, null) { @Override @@ -112,27 +105,18 @@ public class ClearScrollControllerTests extends ESTestCase { String scrollId = TransportSearchHelper.buildScrollId(array); DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); CountDownLatch latch = new CountDownLatch(1); - ActionListener listener = new ActionListener() { + ActionListener listener = new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(ClearScrollResponse clearScrollResponse) { - try { - assertEquals(numFreed.get(), clearScrollResponse.getNumFreed()); - assertTrue(clearScrollResponse.isSucceeded()); - } finally { - latch.countDown(); - } - + assertEquals(numFreed.get(), clearScrollResponse.getNumFreed()); + assertTrue(clearScrollResponse.isSucceeded()); } @Override public void onFailure(Exception e) { - try { - throw new AssertionError(e); - } finally { - latch.countDown(); - } + throw new AssertionError(e); } - }; + }, latch); List nodesInvoked = new CopyOnWriteArrayList<>(); SearchTransportService searchTransportService = new SearchTransportService(null, null) { @@ -185,32 +169,22 @@ public class ClearScrollControllerTests extends ESTestCase { DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); CountDownLatch latch = new CountDownLatch(1); - ActionListener listener = new ActionListener() { + ActionListener listener = new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(ClearScrollResponse clearScrollResponse) { - try { - assertEquals(numFreed.get(), clearScrollResponse.getNumFreed()); - if (numFailures.get() > 0) { - assertFalse(clearScrollResponse.isSucceeded()); - } else { - assertTrue(clearScrollResponse.isSucceeded()); - } - - } finally { - latch.countDown(); + assertEquals(numFreed.get(), clearScrollResponse.getNumFreed()); + if (numFailures.get() > 0) { + assertFalse(clearScrollResponse.isSucceeded()); + } else { + assertTrue(clearScrollResponse.isSucceeded()); } - } @Override public void onFailure(Exception e) { - try { - throw new AssertionError(e); - } finally { - latch.countDown(); - } + throw new AssertionError(e); } - }; + }, latch); List nodesInvoked = new CopyOnWriteArrayList<>(); SearchTransportService searchTransportService = new SearchTransportService(null, null) { diff --git a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java index f222bcc015c..96d057f50c4 100644 --- a/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/TransportActionFilterChainTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.Node; import org.elasticsearch.tasks.Task; @@ -65,7 +66,7 @@ public class TransportActionFilterChainTests extends ESTestCase { terminate(threadPool); } - public void testActionFiltersRequest() throws ExecutionException, InterruptedException { + public void testActionFiltersRequest() throws InterruptedException { int numFilters = randomInt(10); Set orders = new HashSet<>(numFilters); while (orders.size() < numFilters) { @@ -139,7 +140,7 @@ public class TransportActionFilterChainTests extends ESTestCase { } } - public void testTooManyContinueProcessingRequest() throws ExecutionException, InterruptedException { + public void testTooManyContinueProcessingRequest() throws InterruptedException { final int additionalContinueCount = randomInt(10); RequestTestFilter testFilter = new RequestTestFilter(randomInt(), new RequestCallback() { @@ -169,19 +170,17 @@ public class TransportActionFilterChainTests extends ESTestCase { final AtomicInteger responses = new AtomicInteger(); final List failures = new CopyOnWriteArrayList<>(); - transportAction.execute(new TestRequest(), new ActionListener() { + transportAction.execute(new TestRequest(), new LatchedActionListener<>(new ActionListener() { @Override public void onResponse(TestResponse testResponse) { responses.incrementAndGet(); - latch.countDown(); } @Override public void onFailure(Exception e) { failures.add(e); - latch.countDown(); } - }); + }, latch)); if (!latch.await(10, TimeUnit.SECONDS)) { fail("timeout waiting for the filter to notify the listener as many times as expected"); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index 1a7e5a73e75..57b30d3484b 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -369,17 +369,7 @@ public class TransportWriteActionTests extends ESTestCase { CountDownLatch completionLatch = new CountDownLatch(1); threadPool.generic().execute(() -> { waitForBarrier.run(); - replicaResult.respond(new ActionListener() { - @Override - public void onResponse(TransportResponse.Empty empty) { - completionLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - completionLatch.countDown(); - } - }); + replicaResult.respond(ActionListener.wrap(completionLatch::countDown)); }); if (randomBoolean()) { threadPool.generic().execute(() -> { diff --git a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java index aeb4d9b3a9b..2ea6567c9f8 100644 --- a/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java +++ b/server/src/test/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -23,9 +23,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.persistent.TestPersistentTasksPlugin; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; @@ -72,17 +70,7 @@ public class EnableAssignmentDeciderIT extends ESIntegTestCase { for (int i = 0; i < numberOfTasks; i++) { PersistentTasksService service = internalCluster().getInstance(PersistentTasksService.class); service.sendStartRequest("task_" + i, TestPersistentTasksExecutor.NAME, new TestParams(randomAlphaOfLength(10)), - new ActionListener>() { - @Override - public void onResponse(PersistentTask task) { - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - latch.countDown(); - } - }); + ActionListener.wrap(latch::countDown)); } latch.await();