From f037edb8e3a9d9e38bfe1cb376d178dc30c67416 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 15 Sep 2018 09:16:33 -0400 Subject: [PATCH 01/27] Move CCR monitoring tests to ccr sub-project (#33730) This commit moves the CCR monitoring tests from the monitoring sub-project to the ccr sub-project. --- x-pack/plugin/ccr/build.gradle | 1 + .../xpack/monitoring/collector/ccr/CcrStatsCollectorTests.java | 0 .../monitoring/collector/ccr/CcrStatsMonitoringDocTests.java | 0 3 files changed, 1 insertion(+) rename x-pack/plugin/{monitoring => ccr}/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollectorTests.java (100%) rename x-pack/plugin/{monitoring => ccr}/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java (100%) diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle index 0b1f889a2c1..ea8aa897777 100644 --- a/x-pack/plugin/ccr/build.gradle +++ b/x-pack/plugin/ccr/build.gradle @@ -49,6 +49,7 @@ dependencies { compileOnly project(path: xpackModule('core'), configuration: 'default') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('monitoring'), configuration: 'testArtifacts') } dependencyLicenses { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollectorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollectorTests.java similarity index 100% rename from x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollectorTests.java rename to x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsCollectorTests.java diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java similarity index 100% rename from x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java rename to x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/monitoring/collector/ccr/CcrStatsMonitoringDocTests.java From aa56892f2fb8416e32bbcea3366e0c4e0146601d Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 15 Sep 2018 09:18:15 -0400 Subject: [PATCH 02/27] Move CCR REST tests to ccr sub-project (#33731) This commit moves the CCR REST tests to the ccr sub-project as another step towards running :x-pack:plugin:ccr:check giving us full coverage on CCR. --- x-pack/plugin/ccr/build.gradle | 24 ++++++++++- .../elasticsearch/xpack/ccr/CcrRestIT.java | 42 +++++++++++++++++++ .../api/ccr.create_and_follow_index.json | 0 .../api/ccr.delete_auto_follow_pattern.json | 0 .../rest-api-spec/api/ccr.follow_index.json | 0 .../api/ccr.put_auto_follow_pattern.json | 0 .../rest-api-spec/api/ccr.stats.json | 0 .../rest-api-spec/api/ccr.unfollow_index.json | 0 .../rest-api-spec/test/ccr/auto_follow.yml | 0 .../test/ccr/follow_and_unfollow.yml | 0 .../rest-api-spec/test/ccr/stats.yml | 0 11 files changed, 64 insertions(+), 2 deletions(-) create mode 100644 x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRestIT.java rename x-pack/plugin/{ => ccr}/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json (100%) rename x-pack/plugin/{ => ccr}/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json (100%) rename x-pack/plugin/{ => ccr}/src/test/resources/rest-api-spec/api/ccr.follow_index.json (100%) rename x-pack/plugin/{ => ccr}/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json (100%) rename x-pack/plugin/{ => ccr}/src/test/resources/rest-api-spec/api/ccr.stats.json (100%) rename x-pack/plugin/{ => ccr}/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json (100%) rename x-pack/plugin/{ => ccr}/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml (100%) rename x-pack/plugin/{ => ccr}/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml (100%) rename x-pack/plugin/{ => ccr}/src/test/resources/rest-api-spec/test/ccr/stats.yml (100%) diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle index ea8aa897777..f4c7c09c19e 100644 --- a/x-pack/plugin/ccr/build.gradle +++ b/x-pack/plugin/ccr/build.gradle @@ -14,8 +14,6 @@ esplugin { } archivesBaseName = 'x-pack-ccr' -integTest.enabled = false - compileJava.options.compilerArgs << "-Xlint:-try" compileTestJava.options.compilerArgs << "-Xlint:-try" @@ -29,9 +27,31 @@ task internalClusterTest(type: RandomizedTestingTask, classpath = project.test.classpath testClassesDirs = project.test.testClassesDirs include '**/*IT.class' + exclude '**/CcrRestIT.class' systemProperty 'es.set.netty.runtime.available.processors', 'false' } +integTestCluster { + distribution 'zip' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.security.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + // TODO: reduce the need for superuser here + setupCommand 'setup-ccr-user', + 'bin/elasticsearch-users', 'useradd', 'ccr-user', '-p', 'ccr-user-password', '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'ccr-user', + password: 'ccr-user-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} + check.dependsOn internalClusterTest internalClusterTest.mustRunAfter test diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRestIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRestIT.java new file mode 100644 index 00000000000..45998433d33 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRestIT.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; +import org.junit.After; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; + +public class CcrRestIT extends ESClientYamlSuiteTestCase { + + public CcrRestIT(final ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } + + @Override + protected Settings restClientSettings() { + final String ccrUserAuthHeaderValue = basicAuthHeaderValue("ccr-user", new SecureString("ccr-user-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", ccrUserAuthHeaderValue).build(); + } + + @After + public void cleanup() throws Exception { + XPackRestTestHelper.waitForPendingTasks(adminClient()); + } + +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json b/x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json similarity index 100% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json rename to x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json b/x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json similarity index 100% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json rename to x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_index.json b/x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.follow_index.json similarity index 100% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_index.json rename to x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.follow_index.json diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json b/x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json similarity index 100% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json rename to x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.stats.json b/x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.stats.json similarity index 100% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.stats.json rename to x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.stats.json diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json b/x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json similarity index 100% rename from x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json rename to x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml b/x-pack/plugin/ccr/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml similarity index 100% rename from x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml rename to x-pack/plugin/ccr/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml b/x-pack/plugin/ccr/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml similarity index 100% rename from x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml rename to x-pack/plugin/ccr/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/stats.yml b/x-pack/plugin/ccr/src/test/resources/rest-api-spec/test/ccr/stats.yml similarity index 100% rename from x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/stats.yml rename to x-pack/plugin/ccr/src/test/resources/rest-api-spec/test/ccr/stats.yml From 73417bf09afdfa6eeb7a7d620218b843e8d2653e Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sat, 15 Sep 2018 10:18:59 -0400 Subject: [PATCH 03/27] Move CCR REST tests to a sub-project of ccr This commit moves these REST tests (possibly temporarily) to a sub-project of ccr. We do this (again, possibly temporarily) to keep them within the ccr sub-project yet there are changes within 6.x that prevent these from being in the top-level project (the cluster formation tasks are trying to install x-pack-ccr into the integ-test-zip). Therefore, we isolate these for now until we can understand why there are differences between 6.x and master. --- x-pack/plugin/ccr/build.gradle | 24 ++----------- x-pack/plugin/ccr/qa/rest/build.gradle | 36 +++++++++++++++++++ .../elasticsearch/xpack/ccr/CcrRestIT.java | 0 .../api/ccr.create_and_follow_index.json | 0 .../api/ccr.delete_auto_follow_pattern.json | 0 .../rest-api-spec/api/ccr.follow_index.json | 0 .../api/ccr.put_auto_follow_pattern.json | 0 .../rest-api-spec/api/ccr.stats.json | 0 .../rest-api-spec/api/ccr.unfollow_index.json | 0 .../rest-api-spec/test/ccr/auto_follow.yml | 0 .../test/ccr/follow_and_unfollow.yml | 0 .../rest-api-spec/test/ccr/stats.yml | 0 12 files changed, 38 insertions(+), 22 deletions(-) create mode 100644 x-pack/plugin/ccr/qa/rest/build.gradle rename x-pack/plugin/ccr/{ => qa/rest}/src/test/java/org/elasticsearch/xpack/ccr/CcrRestIT.java (100%) rename x-pack/plugin/ccr/{ => qa/rest}/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json (100%) rename x-pack/plugin/ccr/{ => qa/rest}/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json (100%) rename x-pack/plugin/ccr/{ => qa/rest}/src/test/resources/rest-api-spec/api/ccr.follow_index.json (100%) rename x-pack/plugin/ccr/{ => qa/rest}/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json (100%) rename x-pack/plugin/ccr/{ => qa/rest}/src/test/resources/rest-api-spec/api/ccr.stats.json (100%) rename x-pack/plugin/ccr/{ => qa/rest}/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json (100%) rename x-pack/plugin/ccr/{ => qa/rest}/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml (100%) rename x-pack/plugin/ccr/{ => qa/rest}/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml (100%) rename x-pack/plugin/ccr/{ => qa/rest}/src/test/resources/rest-api-spec/test/ccr/stats.yml (100%) diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle index f4c7c09c19e..ea8aa897777 100644 --- a/x-pack/plugin/ccr/build.gradle +++ b/x-pack/plugin/ccr/build.gradle @@ -14,6 +14,8 @@ esplugin { } archivesBaseName = 'x-pack-ccr' +integTest.enabled = false + compileJava.options.compilerArgs << "-Xlint:-try" compileTestJava.options.compilerArgs << "-Xlint:-try" @@ -27,31 +29,9 @@ task internalClusterTest(type: RandomizedTestingTask, classpath = project.test.classpath testClassesDirs = project.test.testClassesDirs include '**/*IT.class' - exclude '**/CcrRestIT.class' systemProperty 'es.set.netty.runtime.available.processors', 'false' } -integTestCluster { - distribution 'zip' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.monitoring.enabled', 'false' - setting 'xpack.security.enabled', 'true' - setting 'xpack.license.self_generated.type', 'trial' - // TODO: reduce the need for superuser here - setupCommand 'setup-ccr-user', - 'bin/elasticsearch-users', 'useradd', 'ccr-user', '-p', 'ccr-user-password', '-r', 'superuser' - waitCondition = { node, ant -> - File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", - dest: tmpFile.toString(), - username: 'ccr-user', - password: 'ccr-user-password', - ignoreerrors: true, - retries: 10) - return tmpFile.exists() - } -} - check.dependsOn internalClusterTest internalClusterTest.mustRunAfter test diff --git a/x-pack/plugin/ccr/qa/rest/build.gradle b/x-pack/plugin/ccr/qa/rest/build.gradle new file mode 100644 index 00000000000..cfd24123f42 --- /dev/null +++ b/x-pack/plugin/ccr/qa/rest/build.gradle @@ -0,0 +1,36 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('ccr'), configuration: 'runtime') +} + +task restTest(type: RestIntegTestTask) { + mustRunAfter(precommit) +} + +restTestCluster { + distribution 'zip' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.security.enabled', 'true' + setting 'xpack.license.self_generated.type', 'trial' + // TODO: reduce the need for superuser here + setupCommand 'setup-ccr-user', + 'bin/elasticsearch-users', 'useradd', 'ccr-user', '-p', 'ccr-user-password', '-r', 'superuser' + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'ccr-user', + password: 'ccr-user-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} + +check.dependsOn restTest +test.enabled = false diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRestIT.java b/x-pack/plugin/ccr/qa/rest/src/test/java/org/elasticsearch/xpack/ccr/CcrRestIT.java similarity index 100% rename from x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRestIT.java rename to x-pack/plugin/ccr/qa/rest/src/test/java/org/elasticsearch/xpack/ccr/CcrRestIT.java diff --git a/x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json similarity index 100% rename from x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json rename to x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json diff --git a/x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json similarity index 100% rename from x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json rename to x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json diff --git a/x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.follow_index.json b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.follow_index.json similarity index 100% rename from x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.follow_index.json rename to x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.follow_index.json diff --git a/x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json similarity index 100% rename from x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json rename to x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json diff --git a/x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.stats.json b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.stats.json similarity index 100% rename from x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.stats.json rename to x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.stats.json diff --git a/x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json similarity index 100% rename from x-pack/plugin/ccr/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json rename to x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json diff --git a/x-pack/plugin/ccr/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml similarity index 100% rename from x-pack/plugin/ccr/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml rename to x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/auto_follow.yml diff --git a/x-pack/plugin/ccr/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml similarity index 100% rename from x-pack/plugin/ccr/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml rename to x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml diff --git a/x-pack/plugin/ccr/src/test/resources/rest-api-spec/test/ccr/stats.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/stats.yml similarity index 100% rename from x-pack/plugin/ccr/src/test/resources/rest-api-spec/test/ccr/stats.yml rename to x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/stats.yml From db40315afb9d491182b3f7c0745b985718ffd1d7 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Sun, 16 Sep 2018 11:54:55 +0100 Subject: [PATCH 04/27] [HLRC][ML] Add ML get datafeed API to HLRC (#33715) Relates #29827 --- .../client/MLRequestConverters.java | 19 ++ .../client/MachineLearningClient.java | 47 +++- .../client/ml/CloseJobRequest.java | 4 +- .../client/ml/DeleteForecastRequest.java | 2 +- .../client/ml/GetDatafeedRequest.java | 144 ++++++++++++ .../client/ml/GetDatafeedResponse.java | 89 ++++++++ .../client/ml/GetJobRequest.java | 10 +- .../client/ml/GetJobStatsRequest.java | 8 +- .../client/ml/GetOverallBucketsRequest.java | 6 +- .../client/ml/PostDataRequest.java | 2 +- .../client/MLRequestConvertersTests.java | 18 ++ .../client/MachineLearningIT.java | 81 +++++++ .../MlClientDocumentationIT.java | 209 +++++++++++------- .../client/ml/GetDatafeedRequestTests.java | 70 ++++++ .../client/ml/GetDatafeedResponseTests.java | 58 +++++ .../client/ml/GetJobRequestTests.java | 2 +- .../ml/datafeed/DatafeedConfigTests.java | 6 +- .../high-level/ml/get-datafeed.asciidoc | 56 +++++ .../high-level/supported-apis.asciidoc | 2 + 19 files changed, 736 insertions(+), 97 deletions(-) create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetDatafeedRequest.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetDatafeedResponse.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetDatafeedRequestTests.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetDatafeedResponseTests.java create mode 100644 docs/java-rest/high-level/ml/get-datafeed.asciidoc diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index 731a4d41378..1a681822eca 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -35,6 +35,7 @@ import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetCategoriesRequest; +import org.elasticsearch.client.ml.GetDatafeedRequest; import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobStatsRequest; @@ -197,6 +198,24 @@ final class MLRequestConverters { return request; } + static Request getDatafeed(GetDatafeedRequest getDatafeedRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("datafeeds") + .addPathPart(Strings.collectionToCommaDelimitedString(getDatafeedRequest.getDatafeedIds())) + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + if (getDatafeedRequest.isAllowNoDatafeeds() != null) { + params.putParam(GetDatafeedRequest.ALLOW_NO_DATAFEEDS.getPreferredName(), + Boolean.toString(getDatafeedRequest.isAllowNoDatafeeds())); + } + + return request; + } + static Request deleteDatafeed(DeleteDatafeedRequest deleteDatafeedRequest) { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index 0fd397bba89..caaf1326dbd 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -33,6 +33,8 @@ import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsResponse; import org.elasticsearch.client.ml.GetCategoriesRequest; import org.elasticsearch.client.ml.GetCategoriesResponse; +import org.elasticsearch.client.ml.GetDatafeedRequest; +import org.elasticsearch.client.ml.GetDatafeedResponse; import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetInfluencersResponse; import org.elasticsearch.client.ml.GetJobRequest; @@ -466,8 +468,8 @@ public final class MachineLearningClient { * For additional info * see ML PUT datafeed documentation * - * @param request The request containing the {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} settings - * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param request The request containing the {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} settings + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener Listener to be notified upon request completion */ public void putDatafeedAsync(PutDatafeedRequest request, RequestOptions options, ActionListener listener) { @@ -479,6 +481,47 @@ public final class MachineLearningClient { Collections.emptySet()); } + /** + * Gets one or more Machine Learning datafeed configuration info. + * + *

+ * For additional info + * see ML GET datafeed documentation + * + * @param request {@link GetDatafeedRequest} Request containing a list of datafeedId(s) and additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return {@link GetDatafeedResponse} response object containing + * the {@link org.elasticsearch.client.ml.datafeed.DatafeedConfig} objects and the number of jobs found + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public GetDatafeedResponse getDatafeed(GetDatafeedRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getDatafeed, + options, + GetDatafeedResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets one or more Machine Learning datafeed configuration info, asynchronously. + * + *

+ * For additional info + * see ML GET datafeed documentation + * + * @param request {@link GetDatafeedRequest} Request containing a list of datafeedId(s) and additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified with {@link GetDatafeedResponse} upon request completion + */ + public void getDatafeedAsync(GetDatafeedRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getDatafeed, + options, + GetDatafeedResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Deletes the given Machine Learning Datafeed *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobRequest.java index 19f3df8e432..aa6b4fe6c9a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/CloseJobRequest.java @@ -136,9 +136,9 @@ public class CloseJobRequest extends ActionRequest implements ToXContentObject { /** * Whether to ignore if a wildcard expression matches no jobs. * - * This includes `_all` string or when no jobs have been specified + * This includes {@code _all} string or when no jobs have been specified * - * @param allowNoJobs When {@code true} ignore if wildcard or `_all` matches no jobs. Defaults to {@code true} + * @param allowNoJobs When {@code true} ignore if wildcard or {@code _all} matches no jobs. Defaults to {@code true} */ public void setAllowNoJobs(boolean allowNoJobs) { this.allowNoJobs = allowNoJobs; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteForecastRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteForecastRequest.java index f7c8a6c0733..f1d87fa45d6 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteForecastRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteForecastRequest.java @@ -109,7 +109,7 @@ public class DeleteForecastRequest extends ActionRequest implements ToXContentOb } /** - * Sets the `allow_no_forecasts` field. + * Sets the value of "allow_no_forecasts". * * @param allowNoForecasts when {@code true} no error is thrown when {@link DeleteForecastRequest#ALL} does not find any forecasts */ diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetDatafeedRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetDatafeedRequest.java new file mode 100644 index 00000000000..d9750d9616d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetDatafeedRequest.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * Request object to get {@link DatafeedConfig} objects with the matching {@code datafeedId}s. + * + * {@code _all} explicitly gets all the datafeeds in the cluster + * An empty request (no {@code datafeedId}s) implicitly gets all the datafeeds in the cluster + */ +public class GetDatafeedRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField DATAFEED_IDS = new ParseField("datafeed_ids"); + public static final ParseField ALLOW_NO_DATAFEEDS = new ParseField("allow_no_datafeeds"); + + private static final String ALL_DATAFEEDS = "_all"; + private final List datafeedIds; + private Boolean allowNoDatafeeds; + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_datafeed_request", + true, a -> new GetDatafeedRequest(a[0] == null ? new ArrayList<>() : (List) a[0])); + + static { + PARSER.declareStringArray(ConstructingObjectParser.optionalConstructorArg(), DATAFEED_IDS); + PARSER.declareBoolean(GetDatafeedRequest::setAllowNoDatafeeds, ALLOW_NO_DATAFEEDS); + } + + /** + * Helper method to create a query that will get ALL datafeeds + * @return new {@link GetDatafeedRequest} object searching for the datafeedId "_all" + */ + public static GetDatafeedRequest getAllDatafeedsRequest() { + return new GetDatafeedRequest(ALL_DATAFEEDS); + } + + /** + * Get the specified {@link DatafeedConfig} configurations via their unique datafeedIds + * @param datafeedIds must not contain any null values + */ + public GetDatafeedRequest(String... datafeedIds) { + this(Arrays.asList(datafeedIds)); + } + + GetDatafeedRequest(List datafeedIds) { + if (datafeedIds.stream().anyMatch(Objects::isNull)) { + throw new NullPointerException("datafeedIds must not contain null values"); + } + this.datafeedIds = new ArrayList<>(datafeedIds); + } + + /** + * All the datafeedIds for which to get configuration information + */ + public List getDatafeedIds() { + return datafeedIds; + } + + /** + * Whether to ignore if a wildcard expression matches no datafeeds. + * + * @param allowNoDatafeeds If this is {@code false}, then an error is returned when a wildcard (or {@code _all}) + * does not match any datafeeds + */ + public void setAllowNoDatafeeds(boolean allowNoDatafeeds) { + this.allowNoDatafeeds = allowNoDatafeeds; + } + + public Boolean isAllowNoDatafeeds() { + return allowNoDatafeeds; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(datafeedIds, allowNoDatafeeds); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || other.getClass() != getClass()) { + return false; + } + + GetDatafeedRequest that = (GetDatafeedRequest) other; + return Objects.equals(datafeedIds, that.datafeedIds) && + Objects.equals(allowNoDatafeeds, that.allowNoDatafeeds); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (datafeedIds.isEmpty() == false) { + builder.field(DATAFEED_IDS.getPreferredName(), datafeedIds); + } + + if (allowNoDatafeeds != null) { + builder.field(ALLOW_NO_DATAFEEDS.getPreferredName(), allowNoDatafeeds); + } + + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetDatafeedResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetDatafeedResponse.java new file mode 100644 index 00000000000..0aadd7a5766 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetDatafeedResponse.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Contains a {@link List} of the found {@link DatafeedConfig} objects and the total count found + */ +public class GetDatafeedResponse extends AbstractResultResponse { + + public static final ParseField RESULTS_FIELD = new ParseField("datafeeds"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("get_datafeed_response", true, + a -> new GetDatafeedResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(constructorArg(), DatafeedConfig.PARSER, RESULTS_FIELD); + PARSER.declareLong(constructorArg(), AbstractResultResponse.COUNT); + } + + GetDatafeedResponse(List datafeedBuilders, long count) { + super(RESULTS_FIELD, datafeedBuilders.stream().map(DatafeedConfig.Builder::build).collect(Collectors.toList()), count); + } + + /** + * The collection of {@link DatafeedConfig} objects found in the query + */ + public List datafeeds() { + return results; + } + + public static GetDatafeedResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public int hashCode() { + return Objects.hash(results, count); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + GetDatafeedResponse other = (GetDatafeedResponse) obj; + return Objects.equals(results, other.results) && count == other.count; + } + + @Override + public final String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobRequest.java index 3de7037e5c8..46153061e28 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobRequest.java @@ -33,11 +33,11 @@ import java.util.List; import java.util.Objects; /** - * Request object to get {@link Job} objects with the matching `jobId`s or - * `groupName`s. + * Request object to get {@link Job} objects with the matching {@code jobId}s or + * {@code groupName}s. * - * `_all` explicitly gets all the jobs in the cluster - * An empty request (no `jobId`s) implicitly gets all the jobs in the cluster + * {@code _all} explicitly gets all the jobs in the cluster + * An empty request (no {@code jobId}s) implicitly gets all the jobs in the cluster */ public class GetJobRequest extends ActionRequest implements ToXContentObject { @@ -91,7 +91,7 @@ public class GetJobRequest extends ActionRequest implements ToXContentObject { /** * Whether to ignore if a wildcard expression matches no jobs. * - * @param allowNoJobs If this is {@code false}, then an error is returned when a wildcard (or `_all`) does not match any jobs + * @param allowNoJobs If this is {@code false}, then an error is returned when a wildcard (or {@code _all}) does not match any jobs */ public void setAllowNoJobs(boolean allowNoJobs) { this.allowNoJobs = allowNoJobs; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsRequest.java index d8eb350755d..fc3af822163 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsRequest.java @@ -38,8 +38,8 @@ import java.util.Objects; /** * Request object to get {@link org.elasticsearch.client.ml.job.stats.JobStats} by their respective jobIds * - * `_all` explicitly gets all the jobs' statistics in the cluster - * An empty request (no `jobId`s) implicitly gets all the jobs' statistics in the cluster + * {@code _all} explicitly gets all the jobs' statistics in the cluster + * An empty request (no {@code jobId}s) implicitly gets all the jobs' statistics in the cluster */ public class GetJobStatsRequest extends ActionRequest implements ToXContentObject { @@ -100,9 +100,9 @@ public class GetJobStatsRequest extends ActionRequest implements ToXContentObjec /** * Whether to ignore if a wildcard expression matches no jobs. * - * This includes `_all` string or when no jobs have been specified + * This includes {@code _all} string or when no jobs have been specified * - * @param allowNoJobs When {@code true} ignore if wildcard or `_all` matches no jobs. Defaults to {@code true} + * @param allowNoJobs When {@code true} ignore if wildcard or {@code _all} matches no jobs. Defaults to {@code true} */ public void setAllowNoJobs(boolean allowNoJobs) { this.allowNoJobs = allowNoJobs; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetOverallBucketsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetOverallBucketsRequest.java index 97bde11d8c6..490bdd4fbae 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetOverallBucketsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetOverallBucketsRequest.java @@ -109,7 +109,7 @@ public class GetOverallBucketsRequest extends ActionRequest implements ToXConten } /** - * Sets the value of `top_n`. + * Sets the value of "top_n". * @param topN The number of top job bucket scores to be used in the overall_score calculation. Defaults to 1. */ public void setTopN(Integer topN) { @@ -121,7 +121,7 @@ public class GetOverallBucketsRequest extends ActionRequest implements ToXConten } /** - * Sets the value of `bucket_span`. + * Sets the value of "bucket_span". * @param bucketSpan The span of the overall buckets. Must be greater or equal to the largest job’s bucket_span. * Defaults to the largest job’s bucket_span. */ @@ -197,7 +197,7 @@ public class GetOverallBucketsRequest extends ActionRequest implements ToXConten /** * Whether to ignore if a wildcard expression matches no jobs. * - * If this is `false`, then an error is returned when a wildcard (or `_all`) does not match any jobs + * If this is {@code false}, then an error is returned when a wildcard (or {@code _all}) does not match any jobs */ public Boolean isAllowNoJobs() { return allowNoJobs; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostDataRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostDataRequest.java index cc015fc4837..519ac5e0051 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostDataRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostDataRequest.java @@ -38,7 +38,7 @@ import java.util.Map; import java.util.Objects; /** - * POJO for posting data to a Machine Learning job + * Request to post data to a Machine Learning job */ public class PostDataRequest extends ActionRequest implements ToXContentObject { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index e0a9640ef40..61122901b86 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetCategoriesRequest; +import org.elasticsearch.client.ml.GetDatafeedRequest; import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobStatsRequest; @@ -227,6 +228,23 @@ public class MLRequestConvertersTests extends ESTestCase { } } + public void testGetDatafeed() { + GetDatafeedRequest getDatafeedRequest = new GetDatafeedRequest(); + + Request request = MLRequestConverters.getDatafeed(getDatafeedRequest); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/datafeeds", request.getEndpoint()); + assertFalse(request.getParameters().containsKey("allow_no_datafeeds")); + + getDatafeedRequest = new GetDatafeedRequest("feed-1", "feed-*"); + getDatafeedRequest.setAllowNoDatafeeds(true); + request = MLRequestConverters.getDatafeed(getDatafeedRequest); + + assertEquals("/_xpack/ml/datafeeds/feed-1,feed-*", request.getEndpoint()); + assertEquals(Boolean.toString(true), request.getParameters().get("allow_no_datafeeds")); + } + public void testDeleteDatafeed() { String datafeedId = randomAlphaOfLength(10); DeleteDatafeedRequest deleteDatafeedRequest = new DeleteDatafeedRequest(datafeedId); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 598f29eec92..5349378e335 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -32,6 +32,8 @@ import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.FlushJobResponse; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.ForecastJobResponse; +import org.elasticsearch.client.ml.GetDatafeedRequest; +import org.elasticsearch.client.ml.GetDatafeedResponse; import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobResponse; import org.elasticsearch.client.ml.GetJobStatsRequest; @@ -58,6 +60,7 @@ import org.elasticsearch.client.ml.job.config.JobState; import org.elasticsearch.client.ml.job.config.JobUpdate; import org.elasticsearch.client.ml.job.stats.JobStats; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.rest.RestStatus; import org.junit.After; import java.io.IOException; @@ -316,6 +319,84 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { assertThat(createdDatafeed.getIndices(), equalTo(datafeedConfig.getIndices())); } + public void testGetDatafeed() throws Exception { + String jobId1 = "test-get-datafeed-job-1"; + String jobId2 = "test-get-datafeed-job-2"; + Job job1 = buildJob(jobId1); + Job job2 = buildJob(jobId2); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job1), RequestOptions.DEFAULT); + machineLearningClient.putJob(new PutJobRequest(job2), RequestOptions.DEFAULT); + + String datafeedId1 = jobId1 + "-feed"; + String datafeedId2 = jobId2 + "-feed"; + DatafeedConfig datafeed1 = DatafeedConfig.builder(datafeedId1, jobId1).setIndices("data_1").build(); + DatafeedConfig datafeed2 = DatafeedConfig.builder(datafeedId2, jobId2).setIndices("data_2").build(); + machineLearningClient.putDatafeed(new PutDatafeedRequest(datafeed1), RequestOptions.DEFAULT); + machineLearningClient.putDatafeed(new PutDatafeedRequest(datafeed2), RequestOptions.DEFAULT); + + // Test getting specific datafeeds + { + GetDatafeedRequest request = new GetDatafeedRequest(datafeedId1, datafeedId2); + GetDatafeedResponse response = execute(request, machineLearningClient::getDatafeed, machineLearningClient::getDatafeedAsync); + + assertEquals(2, response.count()); + assertThat(response.datafeeds(), hasSize(2)); + assertThat(response.datafeeds().stream().map(DatafeedConfig::getId).collect(Collectors.toList()), + containsInAnyOrder(datafeedId1, datafeedId2)); + } + + // Test getting a single one + { + GetDatafeedRequest request = new GetDatafeedRequest(datafeedId1); + GetDatafeedResponse response = execute(request, machineLearningClient::getDatafeed, machineLearningClient::getDatafeedAsync); + + assertTrue(response.count() == 1L); + assertThat(response.datafeeds().get(0).getId(), equalTo(datafeedId1)); + } + + // Test getting all datafeeds explicitly + { + GetDatafeedRequest request = GetDatafeedRequest.getAllDatafeedsRequest(); + GetDatafeedResponse response = execute(request, machineLearningClient::getDatafeed, machineLearningClient::getDatafeedAsync); + + assertTrue(response.count() == 2L); + assertTrue(response.datafeeds().size() == 2L); + assertThat(response.datafeeds().stream().map(DatafeedConfig::getId).collect(Collectors.toList()), + hasItems(datafeedId1, datafeedId2)); + } + + // Test getting all datafeeds implicitly + { + GetDatafeedResponse response = execute(new GetDatafeedRequest(), machineLearningClient::getDatafeed, + machineLearningClient::getDatafeedAsync); + + assertTrue(response.count() >= 2L); + assertTrue(response.datafeeds().size() >= 2L); + assertThat(response.datafeeds().stream().map(DatafeedConfig::getId).collect(Collectors.toList()), + hasItems(datafeedId1, datafeedId2)); + } + + // Test get missing pattern with allow_no_datafeeds set to true + { + GetDatafeedRequest request = new GetDatafeedRequest("missing-*"); + + GetDatafeedResponse response = execute(request, machineLearningClient::getDatafeed, machineLearningClient::getDatafeedAsync); + + assertThat(response.count(), equalTo(0L)); + } + + // Test get missing pattern with allow_no_datafeeds set to false + { + GetDatafeedRequest request = new GetDatafeedRequest("missing-*"); + request.setAllowNoDatafeeds(false); + + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, + () -> execute(request, machineLearningClient::getDatafeed, machineLearningClient::getDatafeedAsync)); + assertThat(e.status(), equalTo(RestStatus.NOT_FOUND)); + } + } + public void testDeleteDatafeed() throws Exception { String jobId = randomValidJobId(); Job job = buildJob(jobId); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 66a38ca781c..f0f7ffd939f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -45,6 +45,8 @@ import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsResponse; import org.elasticsearch.client.ml.GetCategoriesRequest; import org.elasticsearch.client.ml.GetCategoriesResponse; +import org.elasticsearch.client.ml.GetDatafeedRequest; +import org.elasticsearch.client.ml.GetDatafeedResponse; import org.elasticsearch.client.ml.GetInfluencersRequest; import org.elasticsearch.client.ml.GetInfluencersResponse; import org.elasticsearch.client.ml.GetJobRequest; @@ -208,14 +210,14 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { { //tag::x-pack-ml-get-job-request - GetJobRequest request = new GetJobRequest("get-machine-learning-job1", "get-machine-learning-job*"); //<1> - request.setAllowNoJobs(true); //<2> + GetJobRequest request = new GetJobRequest("get-machine-learning-job1", "get-machine-learning-job*"); // <1> + request.setAllowNoJobs(true); // <2> //end::x-pack-ml-get-job-request //tag::x-pack-ml-get-job-execute GetJobResponse response = client.machineLearning().getJob(request, RequestOptions.DEFAULT); - long numberOfJobs = response.count(); //<1> - List jobs = response.jobs(); //<2> + long numberOfJobs = response.count(); // <1> + List jobs = response.jobs(); // <2> //end::x-pack-ml-get-job-execute assertEquals(2, response.count()); @@ -266,12 +268,12 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { { //tag::x-pack-delete-ml-job-request DeleteJobRequest deleteJobRequest = new DeleteJobRequest("my-first-machine-learning-job"); - deleteJobRequest.setForce(false); //<1> + deleteJobRequest.setForce(false); // <1> AcknowledgedResponse deleteJobResponse = client.machineLearning().deleteJob(deleteJobRequest, RequestOptions.DEFAULT); //end::x-pack-delete-ml-job-request //tag::x-pack-delete-ml-job-response - boolean isAcknowledged = deleteJobResponse.isAcknowledged(); //<1> + boolean isAcknowledged = deleteJobResponse.isAcknowledged(); // <1> //end::x-pack-delete-ml-job-response } { @@ -313,13 +315,13 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { { //tag::x-pack-ml-open-job-request - OpenJobRequest openJobRequest = new OpenJobRequest("opening-my-first-machine-learning-job"); //<1> - openJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); //<2> + OpenJobRequest openJobRequest = new OpenJobRequest("opening-my-first-machine-learning-job"); // <1> + openJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); // <2> //end::x-pack-ml-open-job-request //tag::x-pack-ml-open-job-execute OpenJobResponse openJobResponse = client.machineLearning().openJob(openJobRequest, RequestOptions.DEFAULT); - boolean isOpened = openJobResponse.isOpened(); //<1> + boolean isOpened = openJobResponse.isOpened(); // <1> //end::x-pack-ml-open-job-execute } @@ -328,7 +330,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { ActionListener listener = new ActionListener() { @Override public void onResponse(OpenJobResponse openJobResponse) { - //<1> + // <1> } @Override @@ -343,7 +345,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::x-pack-ml-open-job-execute-async - client.machineLearning().openJobAsync(openJobRequest, RequestOptions.DEFAULT, listener); //<1> + client.machineLearning().openJobAsync(openJobRequest, RequestOptions.DEFAULT, listener); // <1> // end::x-pack-ml-open-job-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -359,15 +361,15 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT); //tag::x-pack-ml-close-job-request - CloseJobRequest closeJobRequest = new CloseJobRequest("closing-my-first-machine-learning-job", "otherjobs*"); //<1> - closeJobRequest.setForce(false); //<2> - closeJobRequest.setAllowNoJobs(true); //<3> - closeJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); //<4> + CloseJobRequest closeJobRequest = new CloseJobRequest("closing-my-first-machine-learning-job", "otherjobs*"); // <1> + closeJobRequest.setForce(false); // <2> + closeJobRequest.setAllowNoJobs(true); // <3> + closeJobRequest.setTimeout(TimeValue.timeValueMinutes(10)); // <4> //end::x-pack-ml-close-job-request //tag::x-pack-ml-close-job-execute CloseJobResponse closeJobResponse = client.machineLearning().closeJob(closeJobRequest, RequestOptions.DEFAULT); - boolean isClosed = closeJobResponse.isClosed(); //<1> + boolean isClosed = closeJobResponse.isClosed(); // <1> //end::x-pack-ml-close-job-execute } @@ -380,7 +382,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { ActionListener listener = new ActionListener() { @Override public void onResponse(CloseJobResponse closeJobResponse) { - //<1> + // <1> } @Override @@ -396,7 +398,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::x-pack-ml-close-job-execute-async - client.machineLearning().closeJobAsync(closeJobRequest, RequestOptions.DEFAULT, listener); //<1> + client.machineLearning().closeJobAsync(closeJobRequest, RequestOptions.DEFAULT, listener); // <1> // end::x-pack-ml-close-job-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -427,37 +429,37 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { customSettings.put("custom-setting-1", "custom-value"); //tag::x-pack-ml-update-job-detector-options - JobUpdate.DetectorUpdate detectorUpdate = new JobUpdate.DetectorUpdate(0, //<1> - "detector description", //<2> - detectionRules); //<3> + JobUpdate.DetectorUpdate detectorUpdate = new JobUpdate.DetectorUpdate(0, // <1> + "detector description", // <2> + detectionRules); // <3> //end::x-pack-ml-update-job-detector-options //tag::x-pack-ml-update-job-options - JobUpdate update = new JobUpdate.Builder(jobId) //<1> - .setDescription("My description") //<2> - .setAnalysisLimits(new AnalysisLimits(1000L, null)) //<3> - .setBackgroundPersistInterval(TimeValue.timeValueHours(3)) //<4> - .setCategorizationFilters(Arrays.asList("categorization-filter")) //<5> - .setDetectorUpdates(Arrays.asList(detectorUpdate)) //<6> - .setGroups(Arrays.asList("job-group-1")) //<7> - .setResultsRetentionDays(10L) //<8> - .setModelPlotConfig(new ModelPlotConfig(true, null)) //<9> - .setModelSnapshotRetentionDays(7L) //<10> - .setCustomSettings(customSettings) //<11> - .setRenormalizationWindowDays(3L) //<12> + JobUpdate update = new JobUpdate.Builder(jobId) // <1> + .setDescription("My description") // <2> + .setAnalysisLimits(new AnalysisLimits(1000L, null)) // <3> + .setBackgroundPersistInterval(TimeValue.timeValueHours(3)) // <4> + .setCategorizationFilters(Arrays.asList("categorization-filter")) // <5> + .setDetectorUpdates(Arrays.asList(detectorUpdate)) // <6> + .setGroups(Arrays.asList("job-group-1")) // <7> + .setResultsRetentionDays(10L) // <8> + .setModelPlotConfig(new ModelPlotConfig(true, null)) // <9> + .setModelSnapshotRetentionDays(7L) // <10> + .setCustomSettings(customSettings) // <11> + .setRenormalizationWindowDays(3L) // <12> .build(); //end::x-pack-ml-update-job-options //tag::x-pack-ml-update-job-request - UpdateJobRequest updateJobRequest = new UpdateJobRequest(update); //<1> + UpdateJobRequest updateJobRequest = new UpdateJobRequest(update); // <1> //end::x-pack-ml-update-job-request //tag::x-pack-ml-update-job-execute PutJobResponse updateJobResponse = client.machineLearning().updateJob(updateJobRequest, RequestOptions.DEFAULT); //end::x-pack-ml-update-job-execute //tag::x-pack-ml-update-job-response - Job updatedJob = updateJobResponse.getResponse(); //<1> + Job updatedJob = updateJobResponse.getResponse(); // <1> //end::x-pack-ml-update-job-response assertEquals(update.getDescription(), updatedJob.getDescription()); @@ -467,7 +469,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { ActionListener listener = new ActionListener() { @Override public void onResponse(PutJobResponse updateJobResponse) { - //<1> + // <1> } @Override @@ -483,7 +485,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::x-pack-ml-update-job-execute-async - client.machineLearning().updateJobAsync(updateJobRequest, RequestOptions.DEFAULT, listener); //<1> + client.machineLearning().updateJobAsync(updateJobRequest, RequestOptions.DEFAULT, listener); // <1> // end::x-pack-ml-update-job-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -590,6 +592,59 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { } } + public void testGetDatafeed() throws Exception { + RestHighLevelClient client = highLevelClient(); + + Job job = MachineLearningIT.buildJob("get-datafeed-job"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + String datafeedId = job.getId() + "-feed"; + DatafeedConfig datafeed = DatafeedConfig.builder(datafeedId, job.getId()).setIndices("foo").build(); + client.machineLearning().putDatafeed(new PutDatafeedRequest(datafeed), RequestOptions.DEFAULT); + + { + //tag::x-pack-ml-get-datafeed-request + GetDatafeedRequest request = new GetDatafeedRequest(datafeedId); // <1> + request.setAllowNoDatafeeds(true); // <2> + //end::x-pack-ml-get-datafeed-request + + //tag::x-pack-ml-get-datafeed-execute + GetDatafeedResponse response = client.machineLearning().getDatafeed(request, RequestOptions.DEFAULT); + long numberOfDatafeeds = response.count(); // <1> + List datafeeds = response.datafeeds(); // <2> + //end::x-pack-ml-get-datafeed-execute + + assertEquals(1, numberOfDatafeeds); + assertEquals(1, datafeeds.size()); + } + { + GetDatafeedRequest request = new GetDatafeedRequest(datafeedId); + + // tag::x-pack-ml-get-datafeed-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(GetDatafeedResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-datafeed-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-datafeed-execute-async + client.machineLearning().getDatafeedAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-datafeed-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testDeleteDatafeed() throws Exception { RestHighLevelClient client = highLevelClient(); @@ -604,13 +659,13 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { { //tag::x-pack-delete-ml-datafeed-request DeleteDatafeedRequest deleteDatafeedRequest = new DeleteDatafeedRequest(datafeedId); - deleteDatafeedRequest.setForce(false); //<1> + deleteDatafeedRequest.setForce(false); // <1> AcknowledgedResponse deleteDatafeedResponse = client.machineLearning().deleteDatafeed( deleteDatafeedRequest, RequestOptions.DEFAULT); //end::x-pack-delete-ml-datafeed-request //tag::x-pack-delete-ml-datafeed-response - boolean isAcknowledged = deleteDatafeedResponse.isAcknowledged(); //<1> + boolean isAcknowledged = deleteDatafeedResponse.isAcknowledged(); // <1> //end::x-pack-delete-ml-datafeed-response } @@ -759,15 +814,15 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { { //tag::x-pack-ml-flush-job-request - FlushJobRequest flushJobRequest = new FlushJobRequest("flushing-my-first-machine-learning-job"); //<1> + FlushJobRequest flushJobRequest = new FlushJobRequest("flushing-my-first-machine-learning-job"); // <1> //end::x-pack-ml-flush-job-request //tag::x-pack-ml-flush-job-request-options - flushJobRequest.setCalcInterim(true); //<1> - flushJobRequest.setAdvanceTime("2018-08-31T16:35:07+00:00"); //<2> - flushJobRequest.setStart("2018-08-31T16:35:17+00:00"); //<3> - flushJobRequest.setEnd("2018-08-31T16:35:27+00:00"); //<4> - flushJobRequest.setSkipTime("2018-08-31T16:35:00+00:00"); //<5> + flushJobRequest.setCalcInterim(true); // <1> + flushJobRequest.setAdvanceTime("2018-08-31T16:35:07+00:00"); // <2> + flushJobRequest.setStart("2018-08-31T16:35:17+00:00"); // <3> + flushJobRequest.setEnd("2018-08-31T16:35:27+00:00"); // <4> + flushJobRequest.setSkipTime("2018-08-31T16:35:00+00:00"); // <5> //end::x-pack-ml-flush-job-request-options //tag::x-pack-ml-flush-job-execute @@ -775,8 +830,8 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { //end::x-pack-ml-flush-job-execute //tag::x-pack-ml-flush-job-response - boolean isFlushed = flushJobResponse.isFlushed(); //<1> - Date lastFinalizedBucketEnd = flushJobResponse.getLastFinalizedBucketEnd(); //<2> + boolean isFlushed = flushJobResponse.isFlushed(); // <1> + Date lastFinalizedBucketEnd = flushJobResponse.getLastFinalizedBucketEnd(); // <2> //end::x-pack-ml-flush-job-response } @@ -785,7 +840,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { ActionListener listener = new ActionListener() { @Override public void onResponse(FlushJobResponse FlushJobResponse) { - //<1> + // <1> } @Override @@ -801,7 +856,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::x-pack-ml-flush-job-execute-async - client.machineLearning().flushJobAsync(flushJobRequest, RequestOptions.DEFAULT, listener); //<1> + client.machineLearning().flushJobAsync(flushJobRequest, RequestOptions.DEFAULT, listener); // <1> // end::x-pack-ml-flush-job-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -839,13 +894,13 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { { //tag::x-pack-ml-delete-forecast-request - DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest("deleting-forecast-for-job"); //<1> + DeleteForecastRequest deleteForecastRequest = new DeleteForecastRequest("deleting-forecast-for-job"); // <1> //end::x-pack-ml-delete-forecast-request //tag::x-pack-ml-delete-forecast-request-options - deleteForecastRequest.setForecastIds(forecastId); //<1> - deleteForecastRequest.timeout("30s"); //<2> - deleteForecastRequest.setAllowNoForecasts(true); //<3> + deleteForecastRequest.setForecastIds(forecastId); // <1> + deleteForecastRequest.timeout("30s"); // <2> + deleteForecastRequest.setAllowNoForecasts(true); // <3> //end::x-pack-ml-delete-forecast-request-options //tag::x-pack-ml-delete-forecast-execute @@ -854,7 +909,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { //end::x-pack-ml-delete-forecast-execute //tag::x-pack-ml-delete-forecast-response - boolean isAcknowledged = deleteForecastResponse.isAcknowledged(); //<1> + boolean isAcknowledged = deleteForecastResponse.isAcknowledged(); // <1> //end::x-pack-ml-delete-forecast-response } { @@ -862,7 +917,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { ActionListener listener = new ActionListener() { @Override public void onResponse(AcknowledgedResponse DeleteForecastResponse) { - //<1> + // <1> } @Override @@ -879,7 +934,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::x-pack-ml-delete-forecast-execute-async - client.machineLearning().deleteForecastAsync(deleteForecastRequest, RequestOptions.DEFAULT, listener); //<1> + client.machineLearning().deleteForecastAsync(deleteForecastRequest, RequestOptions.DEFAULT, listener); // <1> // end::x-pack-ml-delete-forecast-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -897,8 +952,8 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { { //tag::x-pack-ml-get-job-stats-request - GetJobStatsRequest request = new GetJobStatsRequest("get-machine-learning-job-stats1", "get-machine-learning-job-*"); //<1> - request.setAllowNoJobs(true); //<2> + GetJobStatsRequest request = new GetJobStatsRequest("get-machine-learning-job-stats1", "get-machine-learning-job-*"); // <1> + request.setAllowNoJobs(true); // <2> //end::x-pack-ml-get-job-stats-request //tag::x-pack-ml-get-job-stats-execute @@ -906,8 +961,8 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { //end::x-pack-ml-get-job-stats-execute //tag::x-pack-ml-get-job-stats-response - long numberOfJobStats = response.count(); //<1> - List jobStats = response.jobStats(); //<2> + long numberOfJobStats = response.count(); // <1> + List jobStats = response.jobStats(); // <2> //end::x-pack-ml-get-job-stats-response assertEquals(2, response.count()); @@ -964,12 +1019,12 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { { //tag::x-pack-ml-forecast-job-request - ForecastJobRequest forecastJobRequest = new ForecastJobRequest("forecasting-my-first-machine-learning-job"); //<1> + ForecastJobRequest forecastJobRequest = new ForecastJobRequest("forecasting-my-first-machine-learning-job"); // <1> //end::x-pack-ml-forecast-job-request //tag::x-pack-ml-forecast-job-request-options - forecastJobRequest.setExpiresIn(TimeValue.timeValueHours(48)); //<1> - forecastJobRequest.setDuration(TimeValue.timeValueHours(24)); //<2> + forecastJobRequest.setExpiresIn(TimeValue.timeValueHours(48)); // <1> + forecastJobRequest.setDuration(TimeValue.timeValueHours(24)); // <2> //end::x-pack-ml-forecast-job-request-options //tag::x-pack-ml-forecast-job-execute @@ -977,8 +1032,8 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { //end::x-pack-ml-forecast-job-execute //tag::x-pack-ml-forecast-job-response - boolean isAcknowledged = forecastJobResponse.isAcknowledged(); //<1> - String forecastId = forecastJobResponse.getForecastId(); //<2> + boolean isAcknowledged = forecastJobResponse.isAcknowledged(); // <1> + String forecastId = forecastJobResponse.getForecastId(); // <2> //end::x-pack-ml-forecast-job-response assertTrue(isAcknowledged); assertNotNull(forecastId); @@ -988,7 +1043,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { ActionListener listener = new ActionListener() { @Override public void onResponse(ForecastJobResponse forecastJobResponse) { - //<1> + // <1> } @Override @@ -1004,7 +1059,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::x-pack-ml-forecast-job-execute-async - client.machineLearning().forecastJobAsync(forecastJobRequest, RequestOptions.DEFAULT, listener); //<1> + client.machineLearning().forecastJobAsync(forecastJobRequest, RequestOptions.DEFAULT, listener); // <1> // end::x-pack-ml-forecast-job-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1211,18 +1266,18 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { { //tag::x-pack-ml-post-data-request - PostDataRequest.JsonBuilder jsonBuilder = new PostDataRequest.JsonBuilder(); //<1> + PostDataRequest.JsonBuilder jsonBuilder = new PostDataRequest.JsonBuilder(); // <1> Map mapData = new HashMap<>(); mapData.put("total", 109); - jsonBuilder.addDoc(mapData); //<2> - jsonBuilder.addDoc("{\"total\":1000}"); //<3> - PostDataRequest postDataRequest = new PostDataRequest("test-post-data", jsonBuilder); //<4> + jsonBuilder.addDoc(mapData); // <2> + jsonBuilder.addDoc("{\"total\":1000}"); // <3> + PostDataRequest postDataRequest = new PostDataRequest("test-post-data", jsonBuilder); // <4> //end::x-pack-ml-post-data-request //tag::x-pack-ml-post-data-request-options - postDataRequest.setResetStart("2018-08-31T16:35:07+00:00"); //<1> - postDataRequest.setResetEnd("2018-08-31T16:35:17+00:00"); //<2> + postDataRequest.setResetStart("2018-08-31T16:35:07+00:00"); // <1> + postDataRequest.setResetEnd("2018-08-31T16:35:17+00:00"); // <2> //end::x-pack-ml-post-data-request-options postDataRequest.setResetEnd(null); postDataRequest.setResetStart(null); @@ -1232,7 +1287,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { //end::x-pack-ml-post-data-execute //tag::x-pack-ml-post-data-response - DataCounts dataCounts = postDataResponse.getDataCounts(); //<1> + DataCounts dataCounts = postDataResponse.getDataCounts(); // <1> //end::x-pack-ml-post-data-response assertEquals(2, dataCounts.getInputRecordCount()); @@ -1242,7 +1297,7 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { ActionListener listener = new ActionListener() { @Override public void onResponse(PostDataResponse postDataResponse) { - //<1> + // <1> } @Override @@ -1255,14 +1310,14 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { Map mapData = new HashMap<>(); mapData.put("total", 109); jsonBuilder.addDoc(mapData); - PostDataRequest postDataRequest = new PostDataRequest("test-post-data", jsonBuilder); //<1> + PostDataRequest postDataRequest = new PostDataRequest("test-post-data", jsonBuilder); // <1> // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); listener = new LatchedActionListener<>(listener, latch); // tag::x-pack-ml-post-data-execute-async - client.machineLearning().postDataAsync(postDataRequest, RequestOptions.DEFAULT, listener); //<1> + client.machineLearning().postDataAsync(postDataRequest, RequestOptions.DEFAULT, listener); // <1> // end::x-pack-ml-post-data-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetDatafeedRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetDatafeedRequestTests.java new file mode 100644 index 00000000000..cca63d2f29e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetDatafeedRequestTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetDatafeedRequestTests extends AbstractXContentTestCase { + + public void testAllDatafeedRequest() { + GetDatafeedRequest request = GetDatafeedRequest.getAllDatafeedsRequest(); + + assertEquals(request.getDatafeedIds().size(), 1); + assertEquals(request.getDatafeedIds().get(0), "_all"); + } + + public void testNewWithDatafeedId() { + Exception exception = expectThrows(NullPointerException.class, () -> new GetDatafeedRequest("feed",null)); + assertEquals(exception.getMessage(), "datafeedIds must not contain null values"); + } + + @Override + protected GetDatafeedRequest createTestInstance() { + int count = randomIntBetween(0, 10); + List datafeedIds = new ArrayList<>(count); + + for (int i = 0; i < count; i++) { + datafeedIds.add(DatafeedConfigTests.randomValidDatafeedId()); + } + + GetDatafeedRequest request = new GetDatafeedRequest(datafeedIds); + + if (randomBoolean()) { + request.setAllowNoDatafeeds(randomBoolean()); + } + + return request; + } + + @Override + protected GetDatafeedRequest doParseInstance(XContentParser parser) throws IOException { + return GetDatafeedRequest.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetDatafeedResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetDatafeedResponseTests.java new file mode 100644 index 00000000000..e4c93c9d8ac --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetDatafeedResponseTests.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.datafeed.DatafeedConfig; +import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Predicate; + +public class GetDatafeedResponseTests extends AbstractXContentTestCase { + + @Override + protected GetDatafeedResponse createTestInstance() { + int count = randomIntBetween(1, 5); + List results = new ArrayList<>(count); + for(int i = 0; i < count; i++) { + DatafeedConfigTests.createRandomBuilder(); + results.add(DatafeedConfigTests.createRandomBuilder()); + } + return new GetDatafeedResponse(results, count); + } + + @Override + protected GetDatafeedResponse doParseInstance(XContentParser parser) throws IOException { + return GetDatafeedResponse.fromXContent(parser); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobRequestTests.java index 77b2109dd7c..36aa02dbd62 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobRequestTests.java @@ -64,6 +64,6 @@ public class GetJobRequestTests extends AbstractXContentTestCase @Override protected boolean supportsUnknownFields() { - return true; + return false; } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java index 3a7910ad732..7f92d1690f9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/datafeed/DatafeedConfigTests.java @@ -44,6 +44,10 @@ public class DatafeedConfigTests extends AbstractXContentTestCase randomStringList(int min, int max) { diff --git a/docs/java-rest/high-level/ml/get-datafeed.asciidoc b/docs/java-rest/high-level/ml/get-datafeed.asciidoc new file mode 100644 index 00000000000..8e5f0664c61 --- /dev/null +++ b/docs/java-rest/high-level/ml/get-datafeed.asciidoc @@ -0,0 +1,56 @@ +[[java-rest-high-x-pack-ml-get-datafeed]] +=== Get Datafeed API + +The Get Datafeed API provides the ability to get {ml} datafeeds in the cluster. +It accepts a `GetDatafeedRequest` object and responds +with a `GetDatafeedResponse` object. + +[[java-rest-high-x-pack-ml-get-datafeed-request]] +==== Get Datafeed Request + +A `GetDatafeedRequest` object gets can have any number of `datafeedId` entries. +However, they all must be non-null. An empty list is the same as requesting for all datafeeds. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-datafeed-request] +-------------------------------------------------- +<1> Constructing a new request referencing existing `datafeedIds`, can contain wildcards +<2> Whether to ignore if a wildcard expression matches no datafeeds. + (This includes `_all` string or when no datafeeds have been specified) + +[[java-rest-high-x-pack-ml-get-datafeed-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-datafeed-execute] +-------------------------------------------------- +<1> The count of retrieved datafeeds +<2> The retrieved datafeeds + +[[java-rest-high-x-pack-ml-get-datafeed-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-datafeed-execute-async] +-------------------------------------------------- +<1> The `GetDatafeedRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `GetDatafeedResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-datafeed-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 089fe26cede..3619bc9e788 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -221,6 +221,7 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <> * <> * <> +* <> * <> * <> * <> @@ -240,6 +241,7 @@ include::ml/close-job.asciidoc[] include::ml/update-job.asciidoc[] include::ml/flush-job.asciidoc[] include::ml/put-datafeed.asciidoc[] +include::ml/get-datafeed.asciidoc[] include::ml/delete-datafeed.asciidoc[] include::ml/get-job-stats.asciidoc[] include::ml/forecast-job.asciidoc[] From 069605bd91ffb6c4ea62c65de05cd294ff402f10 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sun, 16 Sep 2018 07:32:12 -0400 Subject: [PATCH 05/27] Do not count shard changes tasks against REST tests (#33738) When executing CCR REST tests it is going to be expected after global checkpoint polling goes in that shard changes tasks can still be pending at the end of the test. One way to deal with this is to set a low timeout on these polls, but then that means we are not executing our REST tests with our default production settings and instead would be using an unrealistic low timeout. Alternatively, since we expect these tasks to be there, we can not count them against the test. That is what this commit does. --- .../elasticsearch/xpack/ccr/CcrRestIT.java | 3 +- .../xpack/test/rest/XPackRestTestHelper.java | 51 ++++++++++++++----- 2 files changed, 39 insertions(+), 15 deletions(-) diff --git a/x-pack/plugin/ccr/qa/rest/src/test/java/org/elasticsearch/xpack/ccr/CcrRestIT.java b/x-pack/plugin/ccr/qa/rest/src/test/java/org/elasticsearch/xpack/ccr/CcrRestIT.java index 45998433d33..b0225fc5bf9 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/java/org/elasticsearch/xpack/ccr/CcrRestIT.java +++ b/x-pack/plugin/ccr/qa/rest/src/test/java/org/elasticsearch/xpack/ccr/CcrRestIT.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.elasticsearch.xpack.ccr.action.ShardChangesAction; import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; import org.junit.After; @@ -36,7 +37,7 @@ public class CcrRestIT extends ESClientYamlSuiteTestCase { @After public void cleanup() throws Exception { - XPackRestTestHelper.waitForPendingTasks(adminClient()); + XPackRestTestHelper.waitForPendingTasks(adminClient(), taskName -> taskName.startsWith(ShardChangesAction.NAME)); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java index 5e9fd4a386b..da2002fd4b5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/test/rest/XPackRestTestHelper.java @@ -14,6 +14,7 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; @@ -29,7 +30,9 @@ import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Predicate; +import static org.elasticsearch.test.ESTestCase.assertBusy; import static org.junit.Assert.assertEquals; public final class XPackRestTestHelper { @@ -84,34 +87,54 @@ public final class XPackRestTestHelper { } /** - * Waits for pending tasks to complete + * Wait for outstanding tasks to complete. The specified admin client is used to check the outstanding tasks and this is done using + * {@link ESTestCase#assertBusy(CheckedRunnable)} to give a chance to any outstanding tasks to complete. + * + * @param adminClient the admin client + * @throws Exception if an exception is thrown while checking the outstanding tasks */ - public static void waitForPendingTasks(RestClient adminClient) throws Exception { - ESTestCase.assertBusy(() -> { + public static void waitForPendingTasks(final RestClient adminClient) throws Exception { + waitForPendingTasks(adminClient, taskName -> false); + } + + /** + * Wait for outstanding tasks to complete. The specified admin client is used to check the outstanding tasks and this is done using + * {@link ESTestCase#assertBusy(CheckedRunnable)} to give a chance to any outstanding tasks to complete. The specified filter is used + * to filter out outstanding tasks that are expected to be there. + * + * @param adminClient the admin client + * @param taskFilter predicate used to filter tasks that are expected to be there + * @throws Exception if an exception is thrown while checking the outstanding tasks + */ + public static void waitForPendingTasks(final RestClient adminClient, final Predicate taskFilter) throws Exception { + assertBusy(() -> { try { - Request request = new Request("GET", "/_cat/tasks"); + final Request request = new Request("GET", "/_cat/tasks"); request.addParameter("detailed", "true"); - Response response = adminClient.performRequest(request); - // Check to see if there are tasks still active. We exclude the - // list tasks - // actions tasks form this otherwise we will always fail + final Response response = adminClient.performRequest(request); + /* + * Check to see if there are outstanding tasks; we exclude the list task itself, and any expected outstanding tasks using + * the specified task filter. + */ if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { try (BufferedReader responseReader = new BufferedReader( new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8))) { int activeTasks = 0; String line; - StringBuilder tasksListString = new StringBuilder(); + final StringBuilder tasksListString = new StringBuilder(); while ((line = responseReader.readLine()) != null) { - if (line.startsWith(ListTasksAction.NAME) == false) { - activeTasks++; - tasksListString.append(line); - tasksListString.append('\n'); + final String taskName = line.split("\\s+")[0]; + if (taskName.startsWith(ListTasksAction.NAME) || taskFilter.test(taskName)) { + continue; } + activeTasks++; + tasksListString.append(line); + tasksListString.append('\n'); } assertEquals(activeTasks + " active tasks found:\n" + tasksListString, 0, activeTasks); } } - } catch (IOException e) { + } catch (final IOException e) { throw new AssertionError("Error getting active tasks list", e); } }); From 770ad539780ad8edf24283b4996ba3661ad13c92 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Sun, 16 Sep 2018 10:35:23 -0400 Subject: [PATCH 06/27] Introduce long polling for changes (#33683) Rather than scheduling pings to the leader index when we are caught up to the leader, this commit introduces long polling for changes. We will fire off a request to the leader which if we are already caught up will enter a poll on the leader side to listen for global checkpoint changes. These polls will timeout after a default of one minute, but can also be specified when creating the following task. We use these time outs as a way to keep statistics up to date, to not exaggerate time since last fetches, and to avoid pipes being broken. --- .../xpack/ccr/action/ShardChangesAction.java | 116 ++++++++++++++++-- .../xpack/ccr/action/ShardFollowNodeTask.java | 30 ++--- .../xpack/ccr/action/ShardFollowTask.java | 48 ++++---- .../ccr/action/ShardFollowTasksExecutor.java | 1 + .../action/TransportFollowIndexAction.java | 2 +- .../xpack/ccr/action/AutoFollowTests.java | 2 +- .../ccr/action/ShardChangesActionTests.java | 27 +++- .../ccr/action/ShardFollowNodeTaskTests.java | 94 ++++++++++---- .../ShardFollowTaskReplicationTests.java | 7 ++ .../core/ccr/action/FollowIndexAction.java | 30 ++--- 10 files changed, 258 insertions(+), 99 deletions(-) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index eef3671d516..bf2bbd5af8a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -5,7 +5,9 @@ */ package org.elasticsearch.xpack.ccr.action; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.ActionFilters; @@ -19,6 +21,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.seqno.SeqNoStats; import org.elasticsearch.index.shard.IndexShard; @@ -36,8 +39,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Objects; +import java.util.concurrent.TimeoutException; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; public class ShardChangesAction extends Action { @@ -59,6 +64,7 @@ public class ShardChangesAction extends Action { private int maxOperationCount; private ShardId shardId; private String expectedHistoryUUID; + private TimeValue pollTimeout = FollowIndexAction.DEFAULT_POLL_TIMEOUT; private long maxOperationSizeInBytes = FollowIndexAction.DEFAULT_MAX_BATCH_SIZE_IN_BYTES; public Request(ShardId shardId, String expectedHistoryUUID) { @@ -102,6 +108,14 @@ public class ShardChangesAction extends Action { return expectedHistoryUUID; } + public TimeValue getPollTimeout() { + return pollTimeout; + } + + public void setPollTimeout(final TimeValue pollTimeout) { + this.pollTimeout = Objects.requireNonNull(pollTimeout, "pollTimeout"); + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -126,6 +140,7 @@ public class ShardChangesAction extends Action { maxOperationCount = in.readVInt(); shardId = ShardId.readShardId(in); expectedHistoryUUID = in.readString(); + pollTimeout = in.readTimeValue(); maxOperationSizeInBytes = in.readVLong(); } @@ -136,6 +151,7 @@ public class ShardChangesAction extends Action { out.writeVInt(maxOperationCount); shardId.writeTo(out); out.writeString(expectedHistoryUUID); + out.writeTimeValue(pollTimeout); out.writeVLong(maxOperationSizeInBytes); } @@ -149,12 +165,13 @@ public class ShardChangesAction extends Action { maxOperationCount == request.maxOperationCount && Objects.equals(shardId, request.shardId) && Objects.equals(expectedHistoryUUID, request.expectedHistoryUUID) && + Objects.equals(pollTimeout, request.pollTimeout) && maxOperationSizeInBytes == request.maxOperationSizeInBytes; } @Override public int hashCode() { - return Objects.hash(fromSeqNo, maxOperationCount, shardId, expectedHistoryUUID, maxOperationSizeInBytes); + return Objects.hash(fromSeqNo, maxOperationCount, shardId, expectedHistoryUUID, pollTimeout, maxOperationSizeInBytes); } @Override @@ -164,6 +181,7 @@ public class ShardChangesAction extends Action { ", maxOperationCount=" + maxOperationCount + ", shardId=" + shardId + ", expectedHistoryUUID=" + expectedHistoryUUID + + ", pollTimeout=" + pollTimeout + ", maxOperationSizeInBytes=" + maxOperationSizeInBytes + '}'; } @@ -265,19 +283,90 @@ public class ShardChangesAction extends Action { @Override protected Response shardOperation(Request request, ShardId shardId) throws IOException { - IndexService indexService = indicesService.indexServiceSafe(request.getShard().getIndex()); - IndexShard indexShard = indexService.getShard(request.getShard().id()); - final SeqNoStats seqNoStats = indexShard.seqNoStats(); + final IndexService indexService = indicesService.indexServiceSafe(request.getShard().getIndex()); + final IndexShard indexShard = indexService.getShard(request.getShard().id()); + final SeqNoStats seqNoStats = indexShard.seqNoStats(); final long mappingVersion = clusterService.state().metaData().index(shardId.getIndex()).getMappingVersion(); final Translog.Operation[] operations = getOperations( indexShard, seqNoStats.getGlobalCheckpoint(), - request.fromSeqNo, - request.maxOperationCount, - request.expectedHistoryUUID, - request.maxOperationSizeInBytes); - return new Response(mappingVersion, seqNoStats.getGlobalCheckpoint(), seqNoStats.getMaxSeqNo(), operations); + request.getFromSeqNo(), + request.getMaxOperationCount(), + request.getExpectedHistoryUUID(), + request.getMaxOperationSizeInBytes()); + return getResponse(mappingVersion, seqNoStats, operations); + } + + @Override + protected void asyncShardOperation( + final Request request, + final ShardId shardId, + final ActionListener listener) throws IOException { + final IndexService indexService = indicesService.indexServiceSafe(request.getShard().getIndex()); + final IndexShard indexShard = indexService.getShard(request.getShard().id()); + final SeqNoStats seqNoStats = indexShard.seqNoStats(); + + if (request.getFromSeqNo() > seqNoStats.getGlobalCheckpoint()) { + logger.trace( + "{} waiting for global checkpoint advancement from [{}] to [{}]", + shardId, + seqNoStats.getGlobalCheckpoint(), + request.getFromSeqNo()); + indexShard.addGlobalCheckpointListener( + request.getFromSeqNo(), + (g, e) -> { + if (g != UNASSIGNED_SEQ_NO) { + assert request.getFromSeqNo() <= g + : shardId + " only advanced to [" + g + "] while waiting for [" + request.getFromSeqNo() + "]"; + globalCheckpointAdvanced(shardId, g, request, listener); + } else { + assert e != null; + globalCheckpointAdvancementFailure(shardId, e, request, listener, indexShard); + } + }, + request.getPollTimeout()); + } else { + super.asyncShardOperation(request, shardId, listener); + } + } + + private void globalCheckpointAdvanced( + final ShardId shardId, + final long globalCheckpoint, + final Request request, + final ActionListener listener) { + logger.trace("{} global checkpoint advanced to [{}] after waiting for [{}]", shardId, globalCheckpoint, request.getFromSeqNo()); + try { + super.asyncShardOperation(request, shardId, listener); + } catch (final IOException caught) { + listener.onFailure(caught); + } + } + + private void globalCheckpointAdvancementFailure( + final ShardId shardId, + final Exception e, + final Request request, + final ActionListener listener, + final IndexShard indexShard) { + logger.trace( + () -> new ParameterizedMessage( + "{} exception waiting for global checkpoint advancement to [{}]", shardId, request.getFromSeqNo()), + e); + if (e instanceof TimeoutException) { + try { + final long mappingVersion = + clusterService.state().metaData().index(shardId.getIndex()).getMappingVersion(); + final SeqNoStats latestSeqNoStats = indexShard.seqNoStats(); + listener.onResponse(getResponse(mappingVersion, latestSeqNoStats, EMPTY_OPERATIONS_ARRAY)); + } catch (final Exception caught) { + caught.addSuppressed(e); + listener.onFailure(caught); + } + } else { + listener.onFailure(e); + } } @Override @@ -300,7 +389,7 @@ public class ShardChangesAction extends Action { } - private static final Translog.Operation[] EMPTY_OPERATIONS_ARRAY = new Translog.Operation[0]; + static final Translog.Operation[] EMPTY_OPERATIONS_ARRAY = new Translog.Operation[0]; /** * Returns at most maxOperationCount operations from the specified from sequence number. @@ -324,7 +413,8 @@ public class ShardChangesAction extends Action { historyUUID + "]"); } if (fromSeqNo > globalCheckpoint) { - return EMPTY_OPERATIONS_ARRAY; + throw new IllegalStateException( + "not exposing operations from [" + fromSeqNo + "] greater than the global checkpoint [" + globalCheckpoint + "]"); } int seenBytes = 0; // - 1 is needed, because toSeqNo is inclusive @@ -344,4 +434,8 @@ public class ShardChangesAction extends Action { return operations.toArray(EMPTY_OPERATIONS_ARRAY); } + static Response getResponse(final long mappingVersion, final SeqNoStats seqNoStats, final Translog.Operation[] operations) { + return new Response(mappingVersion, seqNoStats.getGlobalCheckpoint(), seqNoStats.getMaxSeqNo(), operations); + } + } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java index 7faebfdc26c..6bf880661fc 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -50,8 +50,8 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { private final String leaderIndex; private final ShardFollowTask params; + private final TimeValue pollTimeout; private final TimeValue maxRetryDelay; - private final TimeValue idleShardChangesRequestDelay; private final BiConsumer scheduler; private final LongSupplier relativeTimeProvider; @@ -82,8 +82,8 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { this.params = params; this.scheduler = scheduler; this.relativeTimeProvider = relativeTimeProvider; + this.pollTimeout = params.getPollTimeout(); this.maxRetryDelay = params.getMaxRetryDelay(); - this.idleShardChangesRequestDelay = params.getIdleShardRetryDelay(); /* * We keep track of the most recent fetch exceptions, with the number of exceptions that we track equal to the maximum number of * concurrent fetches. For each failed fetch, we track the from sequence number associated with the request, and we clear the entry @@ -229,12 +229,16 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { } innerSendShardChangesRequest(from, maxOperationCount, response -> { - synchronized (ShardFollowNodeTask.this) { - totalFetchTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); - numberOfSuccessfulFetches++; - fetchExceptions.remove(from); - operationsReceived += response.getOperations().length; - totalTransferredBytes += Arrays.stream(response.getOperations()).mapToLong(Translog.Operation::estimateSize).sum(); + if (response.getOperations().length > 0) { + // do not count polls against fetch stats + synchronized (ShardFollowNodeTask.this) { + totalFetchTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); + numberOfSuccessfulFetches++; + fetchExceptions.remove(from); + operationsReceived += response.getOperations().length; + totalTransferredBytes += + Arrays.stream(response.getOperations()).mapToLong(Translog.Operation::estimateSize).sum(); + } } handleReadResponse(from, maxRequiredSeqNo, response); }, @@ -286,15 +290,7 @@ public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { } else { // read is completed, decrement numConcurrentReads--; - if (response.getOperations().length == 0 && leaderGlobalCheckpoint == lastRequestedSeqNo) { - // we got nothing and we have no reason to believe asking again well get us more, treat shard as idle and delay - // future requests - LOGGER.trace("{} received no ops and no known ops to fetch, scheduling to coordinate reads", - params.getFollowShardId()); - scheduler.accept(idleShardChangesRequestDelay, this::coordinateReads); - } else { - coordinateReads(); - } + coordinateReads(); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java index 62894b0ed99..2a01f72ca77 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java @@ -49,7 +49,7 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { public static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches"); public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); public static final ParseField MAX_RETRY_DELAY = new ParseField("max_retry_delay"); - public static final ParseField IDLE_SHARD_RETRY_DELAY = new ParseField("idle_shard_retry_delay"); + public static final ParseField POLL_TIMEOUT = new ParseField("poll_timeout"); public static final ParseField RECORDED_HISTORY_UUID = new ParseField("recorded_history_uuid"); @SuppressWarnings("unchecked") @@ -75,8 +75,8 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_RETRY_DELAY.getPreferredName()), MAX_RETRY_DELAY, ObjectParser.ValueType.STRING); PARSER.declareField(ConstructingObjectParser.constructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), IDLE_SHARD_RETRY_DELAY.getPreferredName()), - IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); + (p, c) -> TimeValue.parseTimeValue(p.text(), POLL_TIMEOUT.getPreferredName()), + POLL_TIMEOUT, ObjectParser.ValueType.STRING); PARSER.declareString(ConstructingObjectParser.constructorArg(), RECORDED_HISTORY_UUID); PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> p.mapStrings(), HEADERS); } @@ -90,23 +90,23 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { private final int maxConcurrentWriteBatches; private final int maxWriteBufferSize; private final TimeValue maxRetryDelay; - private final TimeValue idleShardRetryDelay; + private final TimeValue pollTimeout; private final String recordedLeaderIndexHistoryUUID; private final Map headers; ShardFollowTask( - String leaderClusterAlias, - ShardId followShardId, - ShardId leaderShardId, - int maxBatchOperationCount, - int maxConcurrentReadBatches, - long maxBatchSizeInBytes, - int maxConcurrentWriteBatches, - int maxWriteBufferSize, - TimeValue maxRetryDelay, - TimeValue idleShardRetryDelay, - String recordedLeaderIndexHistoryUUID, - Map headers) { + final String leaderClusterAlias, + final ShardId followShardId, + final ShardId leaderShardId, + final int maxBatchOperationCount, + final int maxConcurrentReadBatches, + final long maxBatchSizeInBytes, + final int maxConcurrentWriteBatches, + final int maxWriteBufferSize, + final TimeValue maxRetryDelay, + final TimeValue pollTimeout, + final String recordedLeaderIndexHistoryUUID, + final Map headers) { this.leaderClusterAlias = leaderClusterAlias; this.followShardId = followShardId; this.leaderShardId = leaderShardId; @@ -116,7 +116,7 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; this.maxWriteBufferSize = maxWriteBufferSize; this.maxRetryDelay = maxRetryDelay; - this.idleShardRetryDelay = idleShardRetryDelay; + this.pollTimeout = pollTimeout; this.recordedLeaderIndexHistoryUUID = recordedLeaderIndexHistoryUUID; this.headers = headers != null ? Collections.unmodifiableMap(headers) : Collections.emptyMap(); } @@ -131,7 +131,7 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { this.maxConcurrentWriteBatches = in.readVInt(); this.maxWriteBufferSize = in.readVInt(); this.maxRetryDelay = in.readTimeValue(); - this.idleShardRetryDelay = in.readTimeValue(); + this.pollTimeout = in.readTimeValue(); this.recordedLeaderIndexHistoryUUID = in.readString(); this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); } @@ -172,8 +172,8 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { return maxRetryDelay; } - public TimeValue getIdleShardRetryDelay() { - return idleShardRetryDelay; + public TimeValue getPollTimeout() { + return pollTimeout; } public String getTaskId() { @@ -204,7 +204,7 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { out.writeVInt(maxConcurrentWriteBatches); out.writeVInt(maxWriteBufferSize); out.writeTimeValue(maxRetryDelay); - out.writeTimeValue(idleShardRetryDelay); + out.writeTimeValue(pollTimeout); out.writeString(recordedLeaderIndexHistoryUUID); out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); } @@ -231,7 +231,7 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); builder.field(MAX_RETRY_DELAY.getPreferredName(), maxRetryDelay.getStringRep()); - builder.field(IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); + builder.field(POLL_TIMEOUT.getPreferredName(), pollTimeout.getStringRep()); builder.field(RECORDED_HISTORY_UUID.getPreferredName(), recordedLeaderIndexHistoryUUID); builder.field(HEADERS.getPreferredName(), headers); return builder.endObject(); @@ -251,7 +251,7 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { maxBatchSizeInBytes == that.maxBatchSizeInBytes && maxWriteBufferSize == that.maxWriteBufferSize && Objects.equals(maxRetryDelay, that.maxRetryDelay) && - Objects.equals(idleShardRetryDelay, that.idleShardRetryDelay) && + Objects.equals(pollTimeout, that.pollTimeout) && Objects.equals(recordedLeaderIndexHistoryUUID, that.recordedLeaderIndexHistoryUUID) && Objects.equals(headers, that.headers); } @@ -268,7 +268,7 @@ public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { maxBatchSizeInBytes, maxWriteBufferSize, maxRetryDelay, - idleShardRetryDelay, + pollTimeout, recordedLeaderIndexHistoryUUID, headers ); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 86556480567..8e1c1a27a36 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -148,6 +148,7 @@ public class ShardFollowTasksExecutor extends PersistentTasksExecutor ShardChangesAction.getOperations( + indexShard, + indexShard.getGlobalCheckpoint(), + numWrites, + numWrites + 1, + indexShard.getHistoryUUID(), + Long.MAX_VALUE)); + final String message = String.format( + Locale.ROOT, + "not exposing operations from [%d] greater than the global checkpoint [%d]", + numWrites, + indexShard.getGlobalCheckpoint()); + assertThat(e, hasToString(containsString(message))); + } // get operations for a range some operations do not exist: - operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), + Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), numWrites - 10, numWrites + 10, indexShard.getHistoryUUID(), Long.MAX_VALUE); assertThat(operations.length, equalTo(10)); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java index 71a97bf8207..ea4a1c12b45 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; import java.util.Queue; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -58,6 +59,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { private Queue leaderGlobalCheckpoints; private Queue followerGlobalCheckpoints; private Queue maxSeqNos; + private Queue responseSizes; public void testCoordinateReads() { ShardFollowNodeTask task = createShardFollowTask(8, between(8, 20), between(1, 20), Integer.MAX_VALUE, Long.MAX_VALUE); @@ -226,6 +228,69 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } + public void testReceiveTimeout() { + final ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + final int numberOfTimeouts = randomIntBetween(1, 32); + for (int i = 0; i < numberOfTimeouts; i++) { + mappingVersions.add(1L); + leaderGlobalCheckpoints.add(63L); + maxSeqNos.add(63L); + responseSizes.add(0); + } + + final AtomicInteger counter = new AtomicInteger(); + beforeSendShardChangesRequest = status -> { + if (counter.get() <= numberOfTimeouts) { + assertThat(status.numberOfSuccessfulFetches(), equalTo(0L)); + assertThat(status.totalFetchTimeMillis(), equalTo(0L)); + assertThat(status.operationsReceived(), equalTo(0L)); + assertThat(status.totalTransferredBytes(), equalTo(0L)); + + assertThat(status.fetchExceptions().entrySet(), hasSize(0)); + assertThat(status.totalFetchTimeMillis(), equalTo(0L)); + assertThat(status.numberOfFailedFetches(), equalTo(0L)); + } else { + // otherwise we will keep looping as if we were repeatedly polling and timing out + simulateResponse.set(false); + } + counter.incrementAndGet(); + }; + + mappingVersions.add(1L); + mappingVersions.add(1L); + leaderGlobalCheckpoints.add(63L); + maxSeqNos.add(63L); + simulateResponse.set(true); + + task.coordinateReads(); + + // one request for each request that we simulate timedout, plus our request that receives a reply, and then a follow-up request + assertThat(shardChangesRequests, hasSize(1 + 1 + numberOfTimeouts)); + for (final long[] shardChangesRequest : shardChangesRequests.subList(0, shardChangesRequests.size() - 2)) { + assertNotNull(shardChangesRequest); + assertThat(shardChangesRequest.length, equalTo(2)); + assertThat(shardChangesRequest[0], equalTo(0L)); + assertThat(shardChangesRequest[1], equalTo(64L)); + } + final long[] lastShardChangesRequest = shardChangesRequests.get(shardChangesRequests.size() - 1); + assertNotNull(lastShardChangesRequest); + assertThat(lastShardChangesRequest.length, equalTo(2)); + assertThat(lastShardChangesRequest[0], equalTo(64L)); + assertThat(lastShardChangesRequest[1], equalTo(64L)); + + final ShardFollowNodeTaskStatus status = task.getStatus(); + assertThat(status.numberOfSuccessfulFetches(), equalTo(1L)); + assertThat(status.numberOfFailedFetches(), equalTo(0L)); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + assertThat(status.leaderMaxSeqNo(), equalTo(63L)); + + assertThat(counter.get(), equalTo(1 + 1 + numberOfTimeouts)); + } + public void testReceiveNonRetryableError() { ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); startTask(task, 63, -1); @@ -357,29 +422,6 @@ public class ShardFollowNodeTaskTests extends ESTestCase { assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); } - public void testDelayCoordinatesRead() { - int[] counter = new int[]{0}; - scheduler = (delay, task) -> { - counter[0]++; - task.run(); - }; - ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); - startTask(task, 63, -1); - - task.coordinateReads(); - assertThat(shardChangesRequests.size(), equalTo(1)); - assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); - assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); - - shardChangesRequests.clear(); - ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 63L); - // Also invokes coordinateReads() - task.innerHandleReadResponse(0L, 63L, response); - task.innerHandleReadResponse(64L, 63L, - new ShardChangesAction.Response(0, 63L, 63L, new Translog.Operation[0])); - assertThat(counter[0], equalTo(1)); - } - public void testMappingUpdate() { ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); startTask(task, 63, -1); @@ -653,6 +695,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase { leaderGlobalCheckpoints = new LinkedList<>(); followerGlobalCheckpoints = new LinkedList<>(); maxSeqNos = new LinkedList<>(); + responseSizes = new LinkedList<>(); return new ShardFollowNodeTask( 1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), params, scheduler, System::nanoTime) { @@ -699,8 +742,9 @@ public class ShardFollowNodeTaskTests extends ESTestCase { if (readFailure != null) { errorHandler.accept(readFailure); } else if (simulateResponse.get()) { - final Translog.Operation[] operations = new Translog.Operation[requestBatchSize]; - for (int i = 0; i < requestBatchSize; i++) { + final int responseSize = responseSizes.size() == 0 ? requestBatchSize : responseSizes.poll(); + final Translog.Operation[] operations = new Translog.Operation[responseSize]; + for (int i = 0; i < responseSize; i++) { operations[i] = new Translog.NoOp(from + i, 0, "test"); } final ShardChangesAction.Response response = new ShardChangesAction.Response( diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 9b04390a3a7..0bb263d3c44 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -160,6 +160,9 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest recoverShardFromStore(leaderGroup.getPrimary()); String newHistoryUUID = leaderGroup.getPrimary().getHistoryUUID(); + // force the global checkpoint on the leader to advance + leaderGroup.appendDocs(64); + assertBusy(() -> { assertThat(shardFollowTask.isStopped(), is(true)); assertThat(shardFollowTask.getFailure().getMessage(), equalTo("unexpected history uuid, expected [" + oldHistoryUUID + @@ -259,6 +262,10 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest for (IndexShard indexShard : indexShards) { try { final SeqNoStats seqNoStats = indexShard.seqNoStats(); + if (from > seqNoStats.getGlobalCheckpoint()) { + handler.accept(ShardChangesAction.getResponse(1L, seqNoStats, ShardChangesAction.EMPTY_OPERATIONS_ARRAY)); + return; + } Translog.Operation[] ops = ShardChangesAction.getOperations(indexShard, seqNoStats.getGlobalCheckpoint(), from, maxOperationCount, params.getRecordedLeaderIndexHistoryUUID(), params.getMaxBatchSizeInBytes()); // hard code mapping version; this is ok, as mapping updates are not tested here diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowIndexAction.java index d5a0b0408c5..65136b41a29 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowIndexAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowIndexAction.java @@ -36,8 +36,8 @@ public final class FollowIndexAction extends Action { public static final int DEFAULT_MAX_CONCURRENT_WRITE_BATCHES = 1; public static final long DEFAULT_MAX_BATCH_SIZE_IN_BYTES = Long.MAX_VALUE; static final TimeValue DEFAULT_MAX_RETRY_DELAY = new TimeValue(500); - static final TimeValue DEFAULT_IDLE_SHARD_RETRY_DELAY = TimeValue.timeValueSeconds(10); static final TimeValue MAX_RETRY_DELAY = TimeValue.timeValueMinutes(5); + public static final TimeValue DEFAULT_POLL_TIMEOUT = TimeValue.timeValueMinutes(1); private FollowIndexAction() { super(NAME); @@ -58,7 +58,7 @@ public final class FollowIndexAction extends Action { private static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches"); private static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); private static final ParseField MAX_RETRY_DELAY_FIELD = new ParseField("max_retry_delay"); - private static final ParseField IDLE_SHARD_RETRY_DELAY = new ParseField("idle_shard_retry_delay"); + private static final ParseField POLL_TIMEOUT = new ParseField("poll_timeout"); private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, (args, followerIndex) -> { if (args[1] != null) { @@ -83,8 +83,8 @@ public final class FollowIndexAction extends Action { ObjectParser.ValueType.STRING); PARSER.declareField( ConstructingObjectParser.optionalConstructorArg(), - (p, c) -> TimeValue.parseTimeValue(p.text(), IDLE_SHARD_RETRY_DELAY.getPreferredName()), - IDLE_SHARD_RETRY_DELAY, + (p, c) -> TimeValue.parseTimeValue(p.text(), POLL_TIMEOUT.getPreferredName()), + POLL_TIMEOUT, ObjectParser.ValueType.STRING); } @@ -151,10 +151,10 @@ public final class FollowIndexAction extends Action { return maxRetryDelay; } - private TimeValue idleShardRetryDelay; + private TimeValue pollTimeout; - public TimeValue getIdleShardRetryDelay() { - return idleShardRetryDelay; + public TimeValue getPollTimeout() { + return pollTimeout; } public Request( @@ -166,7 +166,7 @@ public final class FollowIndexAction extends Action { final Integer maxConcurrentWriteBatches, final Integer maxWriteBufferSize, final TimeValue maxRetryDelay, - final TimeValue idleShardRetryDelay) { + final TimeValue pollTimeout) { if (leaderIndex == null) { throw new IllegalArgumentException(LEADER_INDEX_FIELD.getPreferredName() + " is missing"); @@ -206,7 +206,7 @@ public final class FollowIndexAction extends Action { } final TimeValue actualRetryTimeout = maxRetryDelay == null ? DEFAULT_MAX_RETRY_DELAY : maxRetryDelay; - final TimeValue actualIdleShardRetryDelay = idleShardRetryDelay == null ? DEFAULT_IDLE_SHARD_RETRY_DELAY : idleShardRetryDelay; + final TimeValue actualPollTimeout = pollTimeout == null ? DEFAULT_POLL_TIMEOUT : pollTimeout; this.leaderIndex = leaderIndex; this.followerIndex = followerIndex; @@ -216,7 +216,7 @@ public final class FollowIndexAction extends Action { this.maxConcurrentWriteBatches = actualMaxConcurrentWriteBatches; this.maxWriteBufferSize = actualMaxWriteBufferSize; this.maxRetryDelay = actualRetryTimeout; - this.idleShardRetryDelay = actualIdleShardRetryDelay; + this.pollTimeout = actualPollTimeout; } public Request() { @@ -252,7 +252,7 @@ public final class FollowIndexAction extends Action { maxConcurrentWriteBatches = in.readVInt(); maxWriteBufferSize = in.readVInt(); maxRetryDelay = in.readOptionalTimeValue(); - idleShardRetryDelay = in.readOptionalTimeValue(); + pollTimeout = in.readOptionalTimeValue(); } @Override @@ -266,7 +266,7 @@ public final class FollowIndexAction extends Action { out.writeVInt(maxConcurrentWriteBatches); out.writeVInt(maxWriteBufferSize); out.writeOptionalTimeValue(maxRetryDelay); - out.writeOptionalTimeValue(idleShardRetryDelay); + out.writeOptionalTimeValue(pollTimeout); } @Override @@ -281,7 +281,7 @@ public final class FollowIndexAction extends Action { builder.field(MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); builder.field(MAX_RETRY_DELAY_FIELD.getPreferredName(), maxRetryDelay.getStringRep()); - builder.field(IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); + builder.field(POLL_TIMEOUT.getPreferredName(), pollTimeout.getStringRep()); } builder.endObject(); return builder; @@ -298,7 +298,7 @@ public final class FollowIndexAction extends Action { maxConcurrentWriteBatches == request.maxConcurrentWriteBatches && maxWriteBufferSize == request.maxWriteBufferSize && Objects.equals(maxRetryDelay, request.maxRetryDelay) && - Objects.equals(idleShardRetryDelay, request.idleShardRetryDelay) && + Objects.equals(pollTimeout, request.pollTimeout) && Objects.equals(leaderIndex, request.leaderIndex) && Objects.equals(followerIndex, request.followerIndex); } @@ -314,7 +314,7 @@ public final class FollowIndexAction extends Action { maxConcurrentWriteBatches, maxWriteBufferSize, maxRetryDelay, - idleShardRetryDelay + pollTimeout ); } } From e5d82c3dea8dd69889000ffa68ba67dbe3fdf03d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Sun, 16 Sep 2018 11:11:51 -0700 Subject: [PATCH 07/27] Test: Fix dv date bwc tests when no docs have a value (#32798) This commit adds a guard around the rare case that no documents in the 10 iterations actually have any values, thus making the warning check incorrect. closes #32779 --- .../index/fielddata/ScriptDocValuesDatesTests.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesDatesTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesDatesTests.java index f1ffe01bfc5..2c9ca591c43 100644 --- a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesDatesTests.java +++ b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesDatesTests.java @@ -47,7 +47,6 @@ public class ScriptDocValuesDatesTests extends ESTestCase { assertDateDocValues(true); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32779") public void testJodaTimeBwc() throws IOException { assertDateDocValues(false, "The joda time api for doc values is deprecated." + " Use -Des.scripting.use_java_time=true to use the java time api for date field doc values"); @@ -71,6 +70,7 @@ public class ScriptDocValuesDatesTests extends ESTestCase { } } + Set warnings = new HashSet<>(); Dates dates = wrap(values, deprecationMessage -> { warnings.add(deprecationMessage); @@ -86,12 +86,14 @@ public class ScriptDocValuesDatesTests extends ESTestCase { } ); + boolean valuesExist = false; for (int round = 0; round < 10; round++) { int d = between(0, values.length - 1); dates.setNextDocId(d); if (expectedDates[d].length > 0) { Object dateValue = AccessController.doPrivileged((PrivilegedAction) dates::getValue, noPermissionsAcc); assertEquals(expectedDates[d][0] , dateValue); + valuesExist = true; } else { Exception e = expectThrows(IllegalStateException.class, () -> dates.getValue()); assertEquals("A document doesn't have a value for a field! " + @@ -106,7 +108,9 @@ public class ScriptDocValuesDatesTests extends ESTestCase { } } - assertThat(warnings, containsInAnyOrder(expectedWarnings)); + if (valuesExist) { + assertThat(warnings, containsInAnyOrder(expectedWarnings)); + } } private Dates wrap(long[][] values, Consumer deprecationHandler, boolean useJavaTime) { From 3046656ab11b0018729dc158d764b8f6246ec92d Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Sun, 16 Sep 2018 19:18:00 -0700 Subject: [PATCH 08/27] Scripting: Rework joda time backcompat (#33486) This commit switches the joda time backcompat in scripting to use augmentation over ZonedDateTime. The augmentation methods provide compatibility with the missing methods between joda's DateTime and java's ZonedDateTime. Due to getDayOfWeek returning an enum in the java API, ZonedDateTime is wrapped so that the method can return int like the joda time does. The java time api version is renamed to getDayOfWeekEnum, which will be kept through 7.x for compatibility while users switch back to getDayOfWeek once joda compatibility is removed. --- .../elasticsearch/gradle/BuildPlugin.groovy | 3 - docs/build.gradle | 1 - .../painless-getting-started.asciidoc | 5 - .../bucket/datehistogram-aggregation.asciidoc | 3 +- modules/lang-painless/build.gradle | 1 - .../elasticsearch/painless/spi/Whitelist.java | 3 +- .../elasticsearch/painless/spi/joda.time.txt | 60 --- .../painless/spi/org.elasticsearch.txt | 88 +++- .../painless/FunctionRefTests.java | 16 +- .../test/painless/20_scriptfield.yml | 4 +- .../common/io/stream/StreamOutput.java | 10 + .../XContentElasticsearchExtension.java | 4 + .../index/fielddata/AtomicFieldData.java | 9 +- .../index/fielddata/ScriptDocValues.java | 88 +--- .../fielddata/plain/AtomicLongFieldData.java | 33 ++ .../script/JodaCompatibleZonedDateTime.java | 414 ++++++++++++++++++ .../support/values/ScriptDoubleValues.java | 3 + .../support/values/ScriptLongValues.java | 3 + .../subphase/DocValueFieldsFetchSubPhase.java | 2 +- .../fielddata/ScriptDocValuesDatesTests.java | 137 ------ .../JodaCompatibleZonedDateTimeTests.java | 240 ++++++++++ .../search/fields/SearchFieldsIT.java | 34 +- 22 files changed, 843 insertions(+), 318 deletions(-) delete mode 100644 modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/joda.time.txt create mode 100644 server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java delete mode 100644 server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesDatesTests.java create mode 100644 server/src/test/java/org/elasticsearch/script/JodaCompatibleZonedDateTimeTests.java diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 05e07049695..06c0827f1ff 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -825,9 +825,6 @@ class BuildPlugin implements Plugin { } } - // TODO: remove this once joda time is removed from scripting in 7.0 - systemProperty 'es.scripting.use_java_time', 'true' - // TODO: remove this once ctx isn't added to update script params in 7.0 systemProperty 'es.scripting.update.ctx_in_params', 'false' diff --git a/docs/build.gradle b/docs/build.gradle index f2a7f8511e3..aa075d05cd5 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -55,7 +55,6 @@ integTestCluster { setting 'reindex.remote.whitelist', '127.0.0.1:*' // TODO: remove this for 7.0, this exists to allow the doc examples in 6.x to continue using the defaults - systemProperty 'es.scripting.use_java_time', 'false' systemProperty 'es.scripting.update.ctx_in_params', 'false' //TODO: remove this once the cname is prepended to the address by default in 7.0 systemProperty 'es.http.cname_in_publish_address', 'true' diff --git a/docs/painless/painless-getting-started.asciidoc b/docs/painless/painless-getting-started.asciidoc index 8cff207ab04..b325fcf5549 100644 --- a/docs/painless/painless-getting-started.asciidoc +++ b/docs/painless/painless-getting-started.asciidoc @@ -220,11 +220,6 @@ GET hockey/_search } ---------------------------------------------------------------- // CONSOLE -// TEST[warning:The joda time api for doc values is deprecated. Use -Des.scripting.use_java_time=true to use the java time api for date field doc values] - -NOTE: Date fields are changing in 7.0 to be exposed as `ZonedDateTime` -from Java 8's time API. To switch to this functionality early, -add `-Des.scripting.use_java_time=true` to `jvm.options`. [float] [[modules-scripting-painless-regex]] diff --git a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc index e19ecac462d..1d185e80f4f 100644 --- a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -416,7 +416,7 @@ POST /sales/_search?size=0 "terms": { "script": { "lang": "painless", - "source": "doc['date'].value.dayOfWeek" + "source": "doc['date'].value.dayOfWeekEnum.value" } } } @@ -425,7 +425,6 @@ POST /sales/_search?size=0 -------------------------------------------------- // CONSOLE // TEST[setup:sales] -// TEST[warning:The joda time api for doc values is deprecated. Use -Des.scripting.use_java_time=true to use the java time api for date field doc values] Response: diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 6bec6f50626..6f68c667fe6 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -24,7 +24,6 @@ esplugin { integTestCluster { module project.project(':modules:mapper-extras') - systemProperty 'es.scripting.use_java_time', 'true' systemProperty 'es.scripting.update.ctx_in_params', 'false' systemProperty 'es.http.cname_in_publish_address', 'true' } diff --git a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java index 31a9e595d0b..d6fe9c6870a 100644 --- a/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java +++ b/modules/lang-painless/spi/src/main/java/org/elasticsearch/painless/spi/Whitelist.java @@ -48,8 +48,7 @@ public final class Whitelist { "java.util.txt", "java.util.function.txt", "java.util.regex.txt", - "java.util.stream.txt", - "joda.time.txt" + "java.util.stream.txt" }; public static final List BASE_WHITELISTS = diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/joda.time.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/joda.time.txt deleted file mode 100644 index 3b2f379c38e..00000000000 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/joda.time.txt +++ /dev/null @@ -1,60 +0,0 @@ -# -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# -# Painless definition file. This defines the hierarchy of classes, -# what methods and fields they have, etc. -# - -# NOTE: this just minimal whitelisting of joda time, just to provide -# convenient access via the scripting API. classes are fully qualified to avoid -# any confusion with java.time - -class org.joda.time.ReadableInstant { - boolean equals(Object) - long getMillis() - int hashCode() - boolean isAfter(org.joda.time.ReadableInstant) - boolean isBefore(org.joda.time.ReadableInstant) - boolean isEqual(org.joda.time.ReadableInstant) - String toString() -} - -class org.joda.time.ReadableDateTime { - int getCenturyOfEra() - int getDayOfMonth() - int getDayOfWeek() - int getDayOfYear() - int getEra() - int getHourOfDay() - int getMillisOfDay() - int getMillisOfSecond() - int getMinuteOfDay() - int getMinuteOfHour() - int getMonthOfYear() - int getSecondOfDay() - int getSecondOfMinute() - int getWeekOfWeekyear() - int getWeekyear() - int getYear() - int getYearOfCentury() - int getYearOfEra() - String toString(String) - String toString(String,Locale) -} diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt index 81009de9979..7ac13c03876 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt @@ -76,9 +76,93 @@ class org.elasticsearch.index.fielddata.ScriptDocValues$Longs { List getValues() } +class org.elasticsearch.script.JodaCompatibleZonedDateTime { + ##### ZonedDateTime methods + int getDayOfMonth() + int getDayOfYear() + int getHour() + LocalDate toLocalDate() + LocalDateTime toLocalDateTime() + int getMinute() + Month getMonth() + int getMonthValue() + int getNano() + int getSecond() + int getYear() + ZonedDateTime minus(TemporalAmount) + ZonedDateTime minus(long,TemporalUnit) + ZonedDateTime minusYears(long) + ZonedDateTime minusMonths(long) + ZonedDateTime minusWeeks(long) + ZonedDateTime minusDays(long) + ZonedDateTime minusHours(long) + ZonedDateTime minusMinutes(long) + ZonedDateTime minusSeconds(long) + ZonedDateTime minusNanos(long) + ZonedDateTime plus(TemporalAmount) + ZonedDateTime plus(long,TemporalUnit) + ZonedDateTime plusDays(long) + ZonedDateTime plusHours(long) + ZonedDateTime plusMinutes(long) + ZonedDateTime plusMonths(long) + ZonedDateTime plusNanos(long) + ZonedDateTime plusSeconds(long) + ZonedDateTime plusWeeks(long) + ZonedDateTime plusYears(long) + Instant toInstant() + OffsetDateTime toOffsetDateTime() + ZonedDateTime truncatedTo(TemporalUnit) + ZonedDateTime with(TemporalAdjuster) + ZonedDateTime with(TemporalField,long) + ZonedDateTime withDayOfMonth(int) + ZonedDateTime withDayOfYear(int) + ZonedDateTime withEarlierOffsetAtOverlap() + ZonedDateTime withFixedOffsetZone() + ZonedDateTime withHour(int) + ZonedDateTime withLaterOffsetAtOverlap() + ZonedDateTime withMinute(int) + ZonedDateTime withMonth(int) + ZonedDateTime withNano(int) + ZonedDateTime withSecond(int) + ZonedDateTime withYear(int) + ZonedDateTime withZoneSameLocal(ZoneId) + ZonedDateTime withZoneSameInstant(ZoneId) + + #### Joda methods that exist in java time + boolean equals(Object) + int hashCode() + boolean isAfter(ZonedDateTime) + boolean isBefore(ZonedDateTime) + boolean isEqual(ZonedDateTime) + String toString() + + #### Joda time methods + long getMillis() + int getCenturyOfEra() + int getEra() + int getHourOfDay() + int getMillisOfDay() + int getMillisOfSecond() + int getMinuteOfDay() + int getMinuteOfHour() + int getMonthOfYear() + int getSecondOfDay() + int getSecondOfMinute() + int getWeekOfWeekyear() + int getWeekyear() + int getYearOfCentury() + int getYearOfEra() + String toString(String) + String toString(String,Locale) + + # conflicting methods + DayOfWeek getDayOfWeekEnum() + int getDayOfWeek() +} + class org.elasticsearch.index.fielddata.ScriptDocValues$Dates { - Object get(int) - Object getValue() + JodaCompatibleZonedDateTime get(int) + JodaCompatibleZonedDateTime getValue() List getValues() } diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java index 5829593f524..96360a62868 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/FunctionRefTests.java @@ -19,10 +19,8 @@ package org.elasticsearch.painless; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; - import java.lang.invoke.LambdaConversionException; +import java.time.Instant; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.containsString; @@ -59,15 +57,15 @@ public class FunctionRefTests extends ScriptTestCase { public void testQualifiedVirtualMethodReference() { long instant = randomLong(); assertEquals(instant, exec( - "List l = [params.d]; return l.stream().mapToLong(org.joda.time.ReadableDateTime::getMillis).sum()", - singletonMap("d", new DateTime(instant, DateTimeZone.UTC)), true)); + "List l = [params.d]; return l.stream().mapToLong(Instant::toEpochMilli).sum()", + singletonMap("d", Instant.ofEpochMilli(instant)), true)); } public void testQualifiedVirtualMethodReferenceDef() { long instant = randomLong(); assertEquals(instant, exec( - "def l = [params.d]; return l.stream().mapToLong(org.joda.time.ReadableDateTime::getMillis).sum()", - singletonMap("d", new DateTime(instant, DateTimeZone.UTC)), true)); + "def l = [params.d]; return l.stream().mapToLong(Instant::toEpochMilli).sum()", + singletonMap("d", Instant.ofEpochMilli(instant)), true)); } public void testCtorMethodReference() { @@ -197,10 +195,10 @@ public class FunctionRefTests extends ScriptTestCase { public void testQualifiedMethodMissing() { Exception e = expectScriptThrows(IllegalArgumentException.class, () -> { - exec("List l = [2, 1]; l.sort(org.joda.time.ReadableDateTime::bogus); return l.get(0);", false); + exec("List l = [2, 1]; l.sort(java.time.Instant::bogus); return l.get(0);", false); }); assertThat(e.getMessage(), - containsString("function reference [org.joda.time.ReadableDateTime::bogus/2] matching [java.util.Comparator")); + containsString("function reference [java.time.Instant::bogus/2] matching [java.util.Comparator, compare/2")); } public void testClassMissing() { diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/20_scriptfield.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/20_scriptfield.yml index 3be6601521e..6bbda238ab2 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/20_scriptfield.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/20_scriptfield.yml @@ -108,7 +108,7 @@ setup: script_fields: bar: script: - source: "doc.date.value.dayOfWeek.value" + source: "doc.date.value.dayOfWeekEnum.value" - match: { hits.hits.0.fields.bar.0: 7} @@ -123,7 +123,7 @@ setup: source: > StringBuilder b = new StringBuilder(); for (def date : doc.dates) { - b.append(" ").append(date.getDayOfWeek().value); + b.append(" ").append(date.getDayOfWeekEnum().value); } return b.toString().trim() diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 199b8872c55..b00706b78ae 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.io.stream.Writeable.Writer; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.script.JodaCompatibleZonedDateTime; import org.joda.time.DateTimeZone; import org.joda.time.ReadableInstant; @@ -680,6 +681,15 @@ public abstract class StreamOutput extends OutputStream { o.writeString(zonedDateTime.getZone().getId()); o.writeLong(zonedDateTime.toInstant().toEpochMilli()); }); + writers.put(JodaCompatibleZonedDateTime.class, (o, v) -> { + // write the joda compatibility datetime as joda datetime + o.writeByte((byte) 13); + final JodaCompatibleZonedDateTime zonedDateTime = (JodaCompatibleZonedDateTime) v; + String zoneId = zonedDateTime.getZonedDateTime().getZone().getId(); + // joda does not understand "Z" for utc, so we must special case + o.writeString(zoneId.equals("Z") ? DateTimeZone.UTC.getID() : zoneId); + o.writeLong(zonedDateTime.toInstant().toEpochMilli()); + }); WRITERS = Collections.unmodifiableMap(writers); } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java index 684e96f678c..f32ba715a80 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.script.JodaCompatibleZonedDateTime; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.Instant; @@ -93,6 +94,7 @@ public class XContentElasticsearchExtension implements XContentBuilderExtension writers.put(Year.class, (b, v) -> b.value(v.toString())); writers.put(Duration.class, (b, v) -> b.value(v.toString())); writers.put(Period.class, (b, v) -> b.value(v.toString())); + writers.put(JodaCompatibleZonedDateTime.class, XContentBuilder::timeValue); writers.put(BytesReference.class, (b, v) -> { if (v == null) { @@ -141,6 +143,8 @@ public class XContentElasticsearchExtension implements XContentBuilderExtension d -> DEFAULT_FORMATTER.format(ZonedDateTime.ofInstant((java.time.Instant) d, ZoneOffset.UTC))); transformers.put(LocalDate.class, d -> ((LocalDate) d).toString()); transformers.put(LocalTime.class, d -> LOCAL_TIME_FORMATTER.format((LocalTime) d)); + transformers.put(JodaCompatibleZonedDateTime.class, + d -> DEFAULT_FORMATTER.format(((JodaCompatibleZonedDateTime) d).getZonedDateTime())); return transformers; } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java index 8abb74ea6d5..c4f310073c4 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/AtomicFieldData.java @@ -28,10 +28,17 @@ import org.elasticsearch.common.lease.Releasable; public interface AtomicFieldData extends Accountable, Releasable { /** - * Returns a "scripting" based values. + * Returns field values for use in scripting. */ ScriptDocValues getScriptValues(); + /** + * Returns field values for use by returned hits. + */ + default ScriptDocValues getLegacyFieldValues() { + return getScriptValues(); + } + /** * Return a String representation of the values. */ diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java index 35284cb655d..0c3e634e352 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java @@ -26,26 +26,17 @@ import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.joda.time.DateTimeZone; -import org.joda.time.MutableDateTime; +import org.elasticsearch.script.JodaCompatibleZonedDateTime; import java.io.IOException; -import java.security.AccessController; -import java.security.PrivilegedAction; import java.time.Instant; import java.time.ZoneOffset; -import java.time.ZonedDateTime; import java.util.AbstractList; import java.util.Arrays; import java.util.Comparator; import java.util.List; -import java.util.function.Consumer; import java.util.function.UnaryOperator; -import static org.elasticsearch.common.Booleans.parseBoolean; - /** * Script level doc values, the assumption is that any implementation will * implement a getValue and a getValues that return @@ -147,55 +138,28 @@ public abstract class ScriptDocValues extends AbstractList { } } - public static final class Dates extends ScriptDocValues { - - /** Whether scripts should expose dates as java time objects instead of joda time. */ - private static final boolean USE_JAVA_TIME = parseBoolean(System.getProperty("es.scripting.use_java_time"), false); - - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(Dates.class)); + public static final class Dates extends ScriptDocValues { private final SortedNumericDocValues in; /** - * Method call to add deprecation message. Normally this is - * {@link #deprecationLogger} but tests override. + * Values wrapped in {@link java.time.ZonedDateTime} objects. */ - private final Consumer deprecationCallback; - - /** - * Whether java time or joda time should be used. This is normally {@link #USE_JAVA_TIME} but tests override it. - */ - private final boolean useJavaTime; - - /** - * Values wrapped in a date time object. The concrete type depends on the system property {@code es.scripting.use_java_time}. - * When that system property is {@code false}, the date time objects are of type {@link MutableDateTime}. When the system - * property is {@code true}, the date time objects are of type {@link java.time.ZonedDateTime}. - */ - private Object[] dates; + private JodaCompatibleZonedDateTime[] dates; private int count; /** * Standard constructor. */ public Dates(SortedNumericDocValues in) { - this(in, message -> deprecationLogger.deprecatedAndMaybeLog("scripting_joda_time_deprecation", message), USE_JAVA_TIME); - } - - /** - * Constructor for testing with a deprecation callback. - */ - Dates(SortedNumericDocValues in, Consumer deprecationCallback, boolean useJavaTime) { this.in = in; - this.deprecationCallback = deprecationCallback; - this.useJavaTime = useJavaTime; } /** * Fetch the first field value or 0 millis after epoch if there are no * in. */ - public Object getValue() { + public JodaCompatibleZonedDateTime getValue() { if (count == 0) { throw new IllegalStateException("A document doesn't have a value for a field! " + "Use doc[].size()==0 to check if a document is missing a field!"); @@ -204,7 +168,7 @@ public abstract class ScriptDocValues extends AbstractList { } @Override - public Object get(int index) { + public JodaCompatibleZonedDateTime get(int index) { if (index >= count) { throw new IndexOutOfBoundsException( "attempted to fetch the [" + index + "] date when there are only [" @@ -235,41 +199,13 @@ public abstract class ScriptDocValues extends AbstractList { if (count == 0) { return; } - if (useJavaTime) { - if (dates == null || count > dates.length) { - // Happens for the document. We delay allocating dates so we can allocate it with a reasonable size. - dates = new ZonedDateTime[count]; - } - for (int i = 0; i < count; ++i) { - dates[i] = ZonedDateTime.ofInstant(Instant.ofEpochMilli(in.nextValue()), ZoneOffset.UTC); - } - } else { - deprecated("The joda time api for doc values is deprecated. Use -Des.scripting.use_java_time=true" + - " to use the java time api for date field doc values"); - if (dates == null || count > dates.length) { - // Happens for the document. We delay allocating dates so we can allocate it with a reasonable size. - dates = new MutableDateTime[count]; - } - for (int i = 0; i < count; i++) { - dates[i] = new MutableDateTime(in.nextValue(), DateTimeZone.UTC); - } + if (dates == null || count > dates.length) { + // Happens for the document. We delay allocating dates so we can allocate it with a reasonable size. + dates = new JodaCompatibleZonedDateTime[count]; + } + for (int i = 0; i < count; ++i) { + dates[i] = new JodaCompatibleZonedDateTime(Instant.ofEpochMilli(in.nextValue()), ZoneOffset.UTC); } - } - - /** - * Log a deprecation log, with the server's permissions, not the permissions of the - * script calling this method. We need to do this to prevent errors when rolling - * the log file. - */ - private void deprecated(String message) { - // Intentionally not calling SpecialPermission.check because this is supposed to be called by scripts - AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Void run() { - deprecationCallback.accept(message); - return null; - } - }); } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java index 9e0f3ab0736..66b25c21c80 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/plain/AtomicLongFieldData.java @@ -25,6 +25,11 @@ import org.elasticsearch.index.fielddata.IndexNumericFieldData.NumericType; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; +import org.elasticsearch.script.JodaCompatibleZonedDateTime; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; /** * Specialization of {@link AtomicNumericFieldData} for integers. @@ -47,6 +52,34 @@ abstract class AtomicLongFieldData implements AtomicNumericFieldData { return ramBytesUsed; } + @Override + public final ScriptDocValues getLegacyFieldValues() { + switch (numericType) { + case DATE: + final ScriptDocValues.Dates realDV = new ScriptDocValues.Dates(getLongValues()); + return new ScriptDocValues() { + + @Override + public int size() { + return realDV.size(); + } + + @Override + public DateTime get(int index) { + JodaCompatibleZonedDateTime dt = realDV.get(index); + return new DateTime(dt.toInstant().toEpochMilli(), DateTimeZone.UTC); + } + + @Override + public void setNextDocId(int docId) throws IOException { + realDV.setNextDocId(docId); + } + }; + default: + return getScriptValues(); + } + } + @Override public final ScriptDocValues getScriptValues() { switch (numericType) { diff --git a/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java b/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java new file mode 100644 index 00000000000..3abb0f6a304 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java @@ -0,0 +1,414 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script; + +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.time.DayOfWeek; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.Month; +import java.time.OffsetDateTime; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoField; +import java.time.temporal.TemporalAdjuster; +import java.time.temporal.TemporalAmount; +import java.time.temporal.TemporalField; +import java.time.temporal.TemporalUnit; +import java.time.temporal.WeekFields; +import java.util.Locale; + +/** + * A wrapper around ZonedDateTime that exposes joda methods for backcompat. + */ +public class JodaCompatibleZonedDateTime { + private static final DeprecationLogger DEPRECATION_LOGGER = + new DeprecationLogger(ESLoggerFactory.getLogger(JodaCompatibleZonedDateTime.class)); + + private static void logDeprecated(String key, String message, Object... params) { + // NOTE: we don't check SpecialPermission because this will be called (indirectly) from scripts + AccessController.doPrivileged((PrivilegedAction) () -> { + DEPRECATION_LOGGER.deprecatedAndMaybeLog(key, message, params); + return null; + }); + } + + private static void logDeprecatedMethod(String oldMethod, String newMethod) { + logDeprecated(oldMethod, "Use of the joda time method [{}] is deprecated. Use [{}] instead.", oldMethod, newMethod); + } + + private ZonedDateTime dt; + + public JodaCompatibleZonedDateTime(Instant instant, ZoneId zone) { + this.dt = ZonedDateTime.ofInstant(instant, zone); + } + + // access the underlying ZonedDateTime + public ZonedDateTime getZonedDateTime() { + return dt; + } + + @Override + public boolean equals(Object o) { + return dt.equals(o); + } + + @Override + public int hashCode() { + return dt.hashCode(); + } + + @Override + public String toString() { + return dt.toString(); + } + + public boolean isAfter(ZonedDateTime o) { + return dt.isAfter(o); + } + + public boolean isBefore(ZonedDateTime o) { + return dt.isBefore(o); + } + + public boolean isEqual(ZonedDateTime o) { + return dt.isEqual(o); + } + + public int getDayOfMonth() { + return dt.getDayOfMonth(); + } + + public int getDayOfYear() { + return dt.getDayOfYear(); + } + + public int getHour() { + return dt.getHour(); + } + + public LocalDate toLocalDate() { + return dt.toLocalDate(); + } + + public LocalDateTime toLocalDateTime() { + return dt.toLocalDateTime(); + } + + public int getMinute() { + return dt.getMinute(); + } + + public Month getMonth() { + return dt.getMonth(); + } + + public int getMonthValue() { + return dt.getMonthValue(); + } + + public int getNano() { + return dt.getNano(); + } + + public int getSecond() { + return dt.getSecond(); + } + + public int getYear() { + return dt.getYear(); + } + + public ZonedDateTime minus(TemporalAmount delta) { + return dt.minus(delta); + } + + public ZonedDateTime minus(long amount, TemporalUnit unit) { + return dt.minus(amount, unit); + } + + public ZonedDateTime minusYears(long amount) { + return dt.minusYears(amount); + } + + public ZonedDateTime minusMonths(long amount) { + return dt.minusMonths(amount); + } + + public ZonedDateTime minusWeeks(long amount) { + return dt.minusWeeks(amount); + } + + public ZonedDateTime minusDays(long amount) { + return dt.minusDays(amount); + } + + public ZonedDateTime minusHours(long amount) { + return dt.minusHours(amount); + } + + public ZonedDateTime minusMinutes(long amount) { + return dt.minusMinutes(amount); + } + + public ZonedDateTime minusSeconds(long amount) { + return dt.minusSeconds(amount); + } + + public ZonedDateTime minusNanos(long amount) { + return dt.minusNanos(amount); + } + + public ZonedDateTime plus(TemporalAmount amount) { + return dt.plus(amount); + } + + public ZonedDateTime plus(long amount,TemporalUnit unit) { + return dt.plus(amount, unit); + } + + public ZonedDateTime plusDays(long amount) { + return dt.plusDays(amount); + } + + public ZonedDateTime plusHours(long amount) { + return dt.plusHours(amount); + } + + public ZonedDateTime plusMinutes(long amount) { + return dt.plusMinutes(amount); + } + + public ZonedDateTime plusMonths(long amount) { + return dt.plusMonths(amount); + } + + public ZonedDateTime plusNanos(long amount) { + return dt.plusNanos(amount); + } + + public ZonedDateTime plusSeconds(long amount) { + return dt.plusSeconds(amount); + } + + public ZonedDateTime plusWeeks(long amount) { + return dt.plusWeeks(amount); + } + + public ZonedDateTime plusYears(long amount) { + return dt.plusYears(amount); + } + + public Instant toInstant() { + return dt.toInstant(); + } + + public OffsetDateTime toOffsetDateTime() { + return dt.toOffsetDateTime(); + } + + @SuppressForbidden(reason = "only exposing the method as a passthrough") + public ZonedDateTime truncatedTo(TemporalUnit unit) { + return dt.truncatedTo(unit); + } + + public ZonedDateTime with(TemporalAdjuster adjuster) { + return dt.with(adjuster); + } + + public ZonedDateTime with(TemporalField field, long newValue) { + return dt.with(field, newValue); + } + + public ZonedDateTime withDayOfMonth(int value) { + return dt.withDayOfMonth(value); + } + + public ZonedDateTime withDayOfYear(int value) { + return dt.withDayOfYear(value); + } + + public ZonedDateTime withEarlierOffsetAtOverlap() { + return dt.withEarlierOffsetAtOverlap(); + } + + public ZonedDateTime withFixedOffsetZone() { + return dt.withFixedOffsetZone(); + } + + public ZonedDateTime withHour(int value) { + return dt.withHour(value); + } + + public ZonedDateTime withLaterOffsetAtOverlap() { + return dt.withLaterOffsetAtOverlap(); + } + + public ZonedDateTime withMinute(int value) { + return dt.withMinute(value); + } + + public ZonedDateTime withMonth(int value) { + return dt.withMonth(value); + } + + public ZonedDateTime withNano(int value) { + return dt.withNano(value); + } + + public ZonedDateTime withSecond(int value) { + return dt.withSecond(value); + } + + public ZonedDateTime withYear(int value) { + return dt.withYear(value); + } + + public ZonedDateTime withZoneSameLocal(ZoneId zone) { + return dt.withZoneSameLocal(zone); + } + + public ZonedDateTime withZoneSameInstant(ZoneId zone) { + return dt.withZoneSameInstant(zone); + } + + @Deprecated + public long getMillis() { + logDeprecatedMethod("getMillis()", "toInstant().toEpochMilli()"); + return dt.toInstant().toEpochMilli(); + } + + @Deprecated + public int getCenturyOfEra() { + logDeprecatedMethod("getCenturyOfEra()", "get(ChronoField.YEAR_OF_ERA) / 100"); + return dt.get(ChronoField.YEAR_OF_ERA) / 100; + } + + @Deprecated + public int getEra() { + logDeprecatedMethod("getEra()", "get(ChronoField.ERA)"); + return dt.get(ChronoField.ERA); + } + + @Deprecated + public int getHourOfDay() { + logDeprecatedMethod("getHourOfDay()", "getHour()"); + return dt.getHour(); + } + + @Deprecated + public int getMillisOfDay() { + logDeprecatedMethod("getMillisOfDay()", "get(ChronoField.MILLI_OF_DAY)"); + return dt.get(ChronoField.MILLI_OF_DAY); + } + + @Deprecated + public int getMillisOfSecond() { + logDeprecatedMethod("getMillisOfSecond()", "get(ChronoField.MILLI_OF_SECOND)"); + return dt.get(ChronoField.MILLI_OF_SECOND); + } + + @Deprecated + public int getMinuteOfDay() { + logDeprecatedMethod("getMinuteOfDay()", "get(ChronoField.MINUTE_OF_DAY)"); + return dt.get(ChronoField.MINUTE_OF_DAY); + } + + @Deprecated + public int getMinuteOfHour() { + logDeprecatedMethod("getMinuteOfHour()", "getMinute()"); + return dt.getMinute(); + } + + @Deprecated + public int getMonthOfYear() { + logDeprecatedMethod("getMonthOfYear()", "getMonthValue()"); + return dt.getMonthValue(); + } + + @Deprecated + public int getSecondOfDay() { + logDeprecatedMethod("getSecondOfDay()", "get(ChronoField.SECOND_OF_DAY)"); + return dt.get(ChronoField.SECOND_OF_DAY); + } + + @Deprecated + public int getSecondOfMinute() { + logDeprecatedMethod("getSecondOfMinute()", "getSecond()"); + return dt.getSecond(); + } + + @Deprecated + public int getWeekOfWeekyear() { + logDeprecatedMethod("getWeekOfWeekyear()", "get(WeekFields.ISO.weekOfWeekBasedYear())"); + return dt.get(WeekFields.ISO.weekOfWeekBasedYear()); + } + + @Deprecated + public int getWeekyear() { + logDeprecatedMethod("getWeekyear()", "get(WeekFields.ISO.weekBasedYear())"); + return dt.get(WeekFields.ISO.weekBasedYear()); + } + + @Deprecated + public int getYearOfCentury() { + logDeprecatedMethod("getYearOfCentury()", "get(ChronoField.YEAR_OF_ERA) % 100"); + return dt.get(ChronoField.YEAR_OF_ERA) % 100; + } + + @Deprecated + public int getYearOfEra() { + logDeprecatedMethod("getYearOfEra()", "get(ChronoField.YEAR_OF_ERA)"); + return dt.get(ChronoField.YEAR_OF_ERA); + } + + @Deprecated + public String toString(String format) { + logDeprecatedMethod("toString(String)", "a DateTimeFormatter"); + // TODO: replace with bwc formatter + return new DateTime(dt.toInstant().toEpochMilli(), DateTimeZone.forID(dt.getZone().getId())).toString(format); + } + + @Deprecated + public String toString(String format, Locale locale) { + logDeprecatedMethod("toString(String,Locale)", "a DateTimeFormatter"); + // TODO: replace with bwc formatter + return new DateTime(dt.toInstant().toEpochMilli(), DateTimeZone.forID(dt.getZone().getId())).toString(format, locale); + } + + public DayOfWeek getDayOfWeekEnum() { + return dt.getDayOfWeek(); + } + + @Deprecated + public int getDayOfWeek() { + logDeprecated("getDayOfWeek()", + "The return type of [getDayOfWeek()] will change to an enum in 7.0. Use getDayOfWeekEnum().getValue()."); + return dt.getDayOfWeek().getValue(); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java index 4bb531c0d40..53b525e5d0d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptDoubleValues.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.support.values; import org.apache.lucene.search.Scorable; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.index.fielddata.SortingNumericDoubleValues; +import org.elasticsearch.script.JodaCompatibleZonedDateTime; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.joda.time.ReadableInstant; @@ -95,6 +96,8 @@ public class ScriptDoubleValues extends SortingNumericDoubleValues implements Sc return ((ReadableInstant) o).getMillis(); } else if (o instanceof ZonedDateTime) { return ((ZonedDateTime) o).toInstant().toEpochMilli(); + } else if (o instanceof JodaCompatibleZonedDateTime) { + return ((JodaCompatibleZonedDateTime) o).toInstant().toEpochMilli(); } else if (o instanceof Boolean) { // We do expose boolean fields as boolean in scripts, however aggregations still expect // that scripts return the same internal representation as regular fields, so boolean diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java index c57afa1960d..bf08c47f827 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/values/ScriptLongValues.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.Scorable; import org.apache.lucene.util.LongValues; import org.elasticsearch.common.lucene.ScorerAware; import org.elasticsearch.index.fielddata.AbstractSortingNumericDocValues; +import org.elasticsearch.script.JodaCompatibleZonedDateTime; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.joda.time.ReadableInstant; @@ -94,6 +95,8 @@ public class ScriptLongValues extends AbstractSortingNumericDocValues implements return ((ReadableInstant) o).getMillis(); } else if (o instanceof ZonedDateTime) { return ((ZonedDateTime) o).toInstant().toEpochMilli(); + } else if (o instanceof JodaCompatibleZonedDateTime) { + return ((JodaCompatibleZonedDateTime) o).toInstant().toEpochMilli(); } else if (o instanceof Boolean) { // We do expose boolean fields as boolean in scripts, however aggregations still expect // that scripts return the same internal representation as regular fields, so boolean diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java index 97e5b70f9da..398bc847b33 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/DocValueFieldsFetchSubPhase.java @@ -115,7 +115,7 @@ public final class DocValueFieldsFetchSubPhase implements FetchSubPhase { subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex); data = indexFieldData.load(subReaderContext); if (format == null) { - scriptValues = data.getScriptValues(); + scriptValues = data.getLegacyFieldValues(); } else if (indexFieldData instanceof IndexNumericFieldData) { if (((IndexNumericFieldData) indexFieldData).getNumericType().isFloatingPoint()) { doubleValues = ((AtomicNumericFieldData) data).getDoubleValues(); diff --git a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesDatesTests.java b/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesDatesTests.java deleted file mode 100644 index 2c9ca591c43..00000000000 --- a/server/src/test/java/org/elasticsearch/index/fielddata/ScriptDocValuesDatesTests.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.fielddata; - -import org.elasticsearch.index.fielddata.ScriptDocValues.Dates; -import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; - -import java.io.IOException; -import java.security.AccessControlContext; -import java.security.AccessController; -import java.security.PermissionCollection; -import java.security.Permissions; -import java.security.PrivilegedAction; -import java.security.ProtectionDomain; -import java.time.Instant; -import java.time.ZoneOffset; -import java.time.ZonedDateTime; -import java.util.HashSet; -import java.util.Set; -import java.util.function.Consumer; -import java.util.function.Function; - -import static org.hamcrest.Matchers.containsInAnyOrder; - -public class ScriptDocValuesDatesTests extends ESTestCase { - - public void testJavaTime() throws IOException { - assertDateDocValues(true); - } - - public void testJodaTimeBwc() throws IOException { - assertDateDocValues(false, "The joda time api for doc values is deprecated." + - " Use -Des.scripting.use_java_time=true to use the java time api for date field doc values"); - } - - public void assertDateDocValues(boolean useJavaTime, String... expectedWarnings) throws IOException { - final Function datetimeCtor; - if (useJavaTime) { - datetimeCtor = millis -> ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneOffset.UTC); - } else { - datetimeCtor = millis -> new DateTime(millis, DateTimeZone.UTC); - } - long[][] values = new long[between(3, 10)][]; - Object[][] expectedDates = new Object[values.length][]; - for (int d = 0; d < values.length; d++) { - values[d] = new long[randomBoolean() ? randomBoolean() ? 0 : 1 : between(2, 100)]; - expectedDates[d] = new Object[values[d].length]; - for (int i = 0; i < values[d].length; i++) { - values[d][i] = randomNonNegativeLong(); - expectedDates[d][i] = datetimeCtor.apply(values[d][i]); - } - } - - - Set warnings = new HashSet<>(); - Dates dates = wrap(values, deprecationMessage -> { - warnings.add(deprecationMessage); - /* Create a temporary directory to prove we are running with the - * server's permissions. */ - createTempDir(); - }, useJavaTime); - // each call to get or getValue will be run with limited permissions, just as they are in scripts - PermissionCollection noPermissions = new Permissions(); - AccessControlContext noPermissionsAcc = new AccessControlContext( - new ProtectionDomain[] { - new ProtectionDomain(null, noPermissions) - } - ); - - boolean valuesExist = false; - for (int round = 0; round < 10; round++) { - int d = between(0, values.length - 1); - dates.setNextDocId(d); - if (expectedDates[d].length > 0) { - Object dateValue = AccessController.doPrivileged((PrivilegedAction) dates::getValue, noPermissionsAcc); - assertEquals(expectedDates[d][0] , dateValue); - valuesExist = true; - } else { - Exception e = expectThrows(IllegalStateException.class, () -> dates.getValue()); - assertEquals("A document doesn't have a value for a field! " + - "Use doc[].size()==0 to check if a document is missing a field!", e.getMessage()); - } - - assertEquals(values[d].length, dates.size()); - for (int i = 0; i < values[d].length; i++) { - final int ndx = i; - Object dateValue = AccessController.doPrivileged((PrivilegedAction) () -> dates.get(ndx), noPermissionsAcc); - assertEquals(expectedDates[d][i], dateValue); - } - } - - if (valuesExist) { - assertThat(warnings, containsInAnyOrder(expectedWarnings)); - } - } - - private Dates wrap(long[][] values, Consumer deprecationHandler, boolean useJavaTime) { - return new Dates(new AbstractSortedNumericDocValues() { - long[] current; - int i; - - @Override - public boolean advanceExact(int doc) { - current = values[doc]; - i = 0; - return current.length > 0; - } - @Override - public int docValueCount() { - return current.length; - } - @Override - public long nextValue() { - return current[i++]; - } - }, deprecationHandler, useJavaTime); - } -} diff --git a/server/src/test/java/org/elasticsearch/script/JodaCompatibleZonedDateTimeTests.java b/server/src/test/java/org/elasticsearch/script/JodaCompatibleZonedDateTimeTests.java new file mode 100644 index 00000000000..8b494826863 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/script/JodaCompatibleZonedDateTimeTests.java @@ -0,0 +1,240 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.Appender; +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.test.ESTestCase; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.junit.Before; + +import java.security.AccessControlContext; +import java.security.AccessController; +import java.security.PermissionCollection; +import java.security.Permissions; +import java.security.PrivilegedAction; +import java.security.ProtectionDomain; +import java.time.DayOfWeek; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.Month; +import java.time.ZoneOffset; +import java.util.Locale; + +import static org.hamcrest.Matchers.equalTo; + +public class JodaCompatibleZonedDateTimeTests extends ESTestCase { + private static final Logger DEPRECATION_LOGGER = + LogManager.getLogger("org.elasticsearch.deprecation.script.JodaCompatibleZonedDateTime"); + + // each call to get or getValue will be run with limited permissions, just as they are in scripts + private static PermissionCollection NO_PERMISSIONS = new Permissions(); + private static AccessControlContext NO_PERMISSIONS_ACC = new AccessControlContext( + new ProtectionDomain[] { + new ProtectionDomain(null, NO_PERMISSIONS) + } + ); + + private JodaCompatibleZonedDateTime javaTime; + private DateTime jodaTime; + + @Before + public void setupTime() { + long millis = randomIntBetween(0, Integer.MAX_VALUE); + javaTime = new JodaCompatibleZonedDateTime(Instant.ofEpochMilli(millis), ZoneOffset.ofHours(-7)); + jodaTime = new DateTime(millis, DateTimeZone.forOffsetHours(-7)); + } + + void assertDeprecation(Runnable assertions, String message) { + Appender appender = new AbstractAppender("test", null, null) { + @Override + public void append(LogEvent event) { + /* Create a temporary directory to prove we are running with the + * server's permissions. */ + createTempDir(); + } + }; + appender.start(); + Loggers.addAppender(DEPRECATION_LOGGER, appender); + try { + // the assertions are run with the same reduced privileges scripts run with + AccessController.doPrivileged((PrivilegedAction) () -> { + assertions.run(); + return null; + }, NO_PERMISSIONS_ACC); + } finally { + appender.stop(); + Loggers.removeAppender(DEPRECATION_LOGGER, appender); + } + + assertWarnings(message); + } + + void assertMethodDeprecation(Runnable assertions, String oldMethod, String newMethod) { + assertDeprecation(assertions, "Use of the joda time method [" + oldMethod + "] is deprecated. Use [" + newMethod + "] instead."); + } + + public void testDayOfMonth() { + assertThat(javaTime.getDayOfMonth(), equalTo(jodaTime.getDayOfMonth())); + } + + public void testDayOfYear() { + assertThat(javaTime.getDayOfYear(), equalTo(jodaTime.getDayOfYear())); + } + + public void testHour() { + assertThat(javaTime.getHour(), equalTo(jodaTime.getHourOfDay())); + } + + public void testLocalDate() { + assertThat(javaTime.toLocalDate(), equalTo(LocalDate.of(jodaTime.getYear(), jodaTime.getMonthOfYear(), jodaTime.getDayOfMonth()))); + } + + public void testLocalDateTime() { + LocalDateTime dt = LocalDateTime.of(jodaTime.getYear(), jodaTime.getMonthOfYear(), jodaTime.getDayOfMonth(), + jodaTime.getHourOfDay(), jodaTime.getMinuteOfHour(), jodaTime.getSecondOfMinute(), + jodaTime.getMillisOfSecond() * 1000000); + assertThat(javaTime.toLocalDateTime(), equalTo(dt)); + } + + public void testMinute() { + assertThat(javaTime.getMinute(), equalTo(jodaTime.getMinuteOfHour())); + } + + public void testMonth() { + assertThat(javaTime.getMonth(), equalTo(Month.of(jodaTime.getMonthOfYear()))); + } + + public void testMonthValue() { + assertThat(javaTime.getMonthValue(), equalTo(jodaTime.getMonthOfYear())); + } + + public void testNano() { + assertThat(javaTime.getNano(), equalTo(jodaTime.getMillisOfSecond() * 1000000)); + } + + public void testSecond() { + assertThat(javaTime.getSecond(), equalTo(jodaTime.getSecondOfMinute())); + } + + public void testYear() { + assertThat(javaTime.getYear(), equalTo(jodaTime.getYear())); + } + + public void testMillis() { + assertMethodDeprecation(() -> assertThat(javaTime.getMillis(), equalTo(jodaTime.getMillis())), + "getMillis()", "toInstant().toEpochMilli()"); + } + + public void testCenturyOfEra() { + assertMethodDeprecation(() -> assertThat(javaTime.getCenturyOfEra(), equalTo(jodaTime.getCenturyOfEra())), + "getCenturyOfEra()", "get(ChronoField.YEAR_OF_ERA) / 100"); + } + + public void testEra() { + assertMethodDeprecation(() -> assertThat(javaTime.getEra(), equalTo(jodaTime.getEra())), + "getEra()", "get(ChronoField.ERA)"); + } + + public void testHourOfDay() { + assertMethodDeprecation(() -> assertThat(javaTime.getHourOfDay(), equalTo(jodaTime.getHourOfDay())), + "getHourOfDay()", "getHour()"); + } + + public void testMillisOfDay() { + assertMethodDeprecation(() -> assertThat(javaTime.getMillisOfDay(), equalTo(jodaTime.getMillisOfDay())), + "getMillisOfDay()", "get(ChronoField.MILLI_OF_DAY)"); + } + + public void testMillisOfSecond() { + assertMethodDeprecation(() -> assertThat(javaTime.getMillisOfSecond(), equalTo(jodaTime.getMillisOfSecond())), + "getMillisOfSecond()", "get(ChronoField.MILLI_OF_SECOND)"); + } + + public void testMinuteOfDay() { + assertMethodDeprecation(() -> assertThat(javaTime.getMinuteOfDay(), equalTo(jodaTime.getMinuteOfDay())), + "getMinuteOfDay()", "get(ChronoField.MINUTE_OF_DAY)"); + } + + public void testMinuteOfHour() { + assertMethodDeprecation(() -> assertThat(javaTime.getMinuteOfHour(), equalTo(jodaTime.getMinuteOfHour())), + "getMinuteOfHour()", "getMinute()"); + } + + public void testMonthOfYear() { + assertMethodDeprecation(() -> assertThat(javaTime.getMonthOfYear(), equalTo(jodaTime.getMonthOfYear())), + "getMonthOfYear()", "getMonthValue()"); + } + + public void testSecondOfDay() { + assertMethodDeprecation(() -> assertThat(javaTime.getSecondOfDay(), equalTo(jodaTime.getSecondOfDay())), + "getSecondOfDay()", "get(ChronoField.SECOND_OF_DAY)"); + } + + public void testSecondOfMinute() { + assertMethodDeprecation(() -> assertThat(javaTime.getSecondOfMinute(), equalTo(jodaTime.getSecondOfMinute())), + "getSecondOfMinute()", "getSecond()"); + } + + public void testWeekOfWeekyear() { + assertMethodDeprecation(() -> assertThat(javaTime.getWeekOfWeekyear(), equalTo(jodaTime.getWeekOfWeekyear())), + "getWeekOfWeekyear()", "get(WeekFields.ISO.weekOfWeekBasedYear())"); + } + + public void testWeekyear() { + assertMethodDeprecation(() -> assertThat(javaTime.getWeekyear(), equalTo(jodaTime.getWeekyear())), + "getWeekyear()", "get(WeekFields.ISO.weekBasedYear())"); + } + + public void testYearOfCentury() { + assertMethodDeprecation(() -> assertThat(javaTime.getYearOfCentury(), equalTo(jodaTime.getYearOfCentury())), + "getYearOfCentury()", "get(ChronoField.YEAR_OF_ERA) % 100"); + } + + public void testYearOfEra() { + assertMethodDeprecation(() -> assertThat(javaTime.getYearOfEra(), equalTo(jodaTime.getYearOfEra())), + "getYearOfEra()", "get(ChronoField.YEAR_OF_ERA)"); + } + + public void testToString1() { + assertMethodDeprecation(() -> assertThat(javaTime.toString("YYYY/MM/dd HH:mm:ss.SSS"), + equalTo(jodaTime.toString("YYYY/MM/dd HH:mm:ss.SSS"))), "toString(String)", "a DateTimeFormatter"); + } + + public void testToString2() { + assertMethodDeprecation(() -> assertThat(javaTime.toString("EEE", Locale.GERMANY), + equalTo(jodaTime.toString("EEE", Locale.GERMANY))), "toString(String,Locale)", "a DateTimeFormatter"); + } + + public void testDayOfWeek() { + assertDeprecation(() -> assertThat(javaTime.getDayOfWeek(), equalTo(jodaTime.getDayOfWeek())), + "The return type of [getDayOfWeek()] will change to an enum in 7.0. Use getDayOfWeekEnum().getValue()."); + } + + public void testDayOfWeekEnum() { + assertThat(javaTime.getDayOfWeekEnum(), equalTo(DayOfWeek.of(jodaTime.getDayOfWeek()))); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index 45b6340ba6f..4bd12c97734 100644 --- a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -47,10 +47,13 @@ import org.elasticsearch.search.lookup.FieldLookup; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.ReadableDateTime; +import org.joda.time.format.DateTimeFormat; import java.time.ZoneOffset; import java.time.ZonedDateTime; -import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; @@ -59,7 +62,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -111,7 +113,7 @@ public class SearchFieldsIT extends ESIntegTestCase { scripts.put("doc['date'].date.millis", vars -> { Map doc = (Map) vars.get("doc"); ScriptDocValues.Dates dates = (ScriptDocValues.Dates) doc.get("date"); - return ((ZonedDateTime) dates.getValue()).toInstant().toEpochMilli(); + return dates.getValue().toInstant().toEpochMilli(); }); scripts.put("_fields['num1'].value", vars -> fieldsScript(vars, "num1")); @@ -801,8 +803,8 @@ public class SearchFieldsIT extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo((Object) 4L)); assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo((Object) 5.0)); assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); - ZonedDateTime dateField = searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(); - assertThat(dateField.toInstant().toEpochMilli(), equalTo(date.toInstant().toEpochMilli())); + DateTime dateField = searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(); + assertThat(dateField.getMillis(), equalTo(date.toInstant().toEpochMilli())); assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); @@ -828,7 +830,7 @@ public class SearchFieldsIT extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo((Object) 5.0)); assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); dateField = searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(); - assertThat(dateField.toInstant().toEpochMilli(), equalTo(date.toInstant().toEpochMilli())); + assertThat(dateField.getMillis(), equalTo(date.toInstant().toEpochMilli())); assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); @@ -968,10 +970,10 @@ public class SearchFieldsIT extends ESIntegTestCase { assertAcked(prepareCreate("test").addMapping("type", mapping)); ensureGreen("test"); - ZonedDateTime date = ZonedDateTime.of(1990, 12, 29, 0, 0, 0, 0, ZoneOffset.UTC); - DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd", Locale.ROOT); + DateTime date = new DateTime(1990, 12, 29, 0, 0, DateTimeZone.UTC); + org.joda.time.format.DateTimeFormatter formatter = DateTimeFormat.forPattern("yyyy-MM-dd"); - index("test", "type", "1", "text_field", "foo", "date_field", formatter.format(date)); + index("test", "type", "1", "text_field", "foo", "date_field", formatter.print(date)); refresh("test"); SearchRequestBuilder builder = client().prepareSearch().setQuery(matchAllQuery()) @@ -999,8 +1001,8 @@ public class SearchFieldsIT extends ESIntegTestCase { DocumentField dateField = fields.get("date_field"); assertThat(dateField.getName(), equalTo("date_field")); - ZonedDateTime fetchedDate = dateField.getValue(); - assertThat(fetchedDate, equalTo(date)); + ReadableDateTime fetchedDate = dateField.getValue(); + assertThat(fetchedDate.getMillis(), equalTo(date.toInstant().getMillis())); } public void testWildcardDocValueFieldsWithFieldAlias() throws Exception { @@ -1033,10 +1035,10 @@ public class SearchFieldsIT extends ESIntegTestCase { assertAcked(prepareCreate("test").addMapping("type", mapping)); ensureGreen("test"); - ZonedDateTime date = ZonedDateTime.of(1990, 12, 29, 0, 0, 0, 0, ZoneOffset.UTC); - DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd", Locale.ROOT); + DateTime date = new DateTime(1990, 12, 29, 0, 0, DateTimeZone.UTC); + org.joda.time.format.DateTimeFormatter formatter = DateTimeFormat.forPattern("yyyy-MM-dd"); - index("test", "type", "1", "text_field", "foo", "date_field", formatter.format(date)); + index("test", "type", "1", "text_field", "foo", "date_field", formatter.print(date)); refresh("test"); SearchRequestBuilder builder = client().prepareSearch().setQuery(matchAllQuery()) @@ -1063,8 +1065,8 @@ public class SearchFieldsIT extends ESIntegTestCase { DocumentField dateField = fields.get("date_field"); assertThat(dateField.getName(), equalTo("date_field")); - ZonedDateTime fetchedDate = dateField.getValue(); - assertThat(fetchedDate, equalTo(date)); + ReadableDateTime fetchedDate = dateField.getValue(); + assertThat(fetchedDate.getMillis(), equalTo(date.toInstant().getMillis())); } From 481f8a9a0766e82fb8f0d0bef30ce6e5ca91cd33 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 17 Sep 2018 07:29:00 +0200 Subject: [PATCH 09/27] [CCR] Make auto follow patterns work with security (#33501) Relates to #33007 --- .../qa/multi-cluster-with-security/roles.yml | 2 +- .../xpack/ccr/FollowIndexSecurityIT.java | 79 ++++++++++++++++--- .../xpack/ccr/CcrLicenseChecker.java | 45 ++++++++++- .../ccr/action/AutoFollowCoordinator.java | 47 ++++++++--- .../ccr/action/ShardFollowTasksExecutor.java | 45 ++--------- .../TransportPutAutoFollowPatternAction.java | 11 ++- .../xpack/ccr/AutoFollowMetadataTests.java | 16 +++- .../elasticsearch/xpack/ccr/CcrLicenseIT.java | 2 +- .../action/AutoFollowCoordinatorTests.java | 61 +++++++++----- ...ortDeleteAutoFollowPatternActionTests.java | 6 +- ...nsportPutAutoFollowPatternActionTests.java | 8 +- .../xpack/core/ccr/AutoFollowMetadata.java | 34 ++++++-- 12 files changed, 252 insertions(+), 104 deletions(-) diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/roles.yml b/x-pack/plugin/ccr/qa/multi-cluster-with-security/roles.yml index 7916bc6eee2..8320143a9fb 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-security/roles.yml +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/roles.yml @@ -2,7 +2,7 @@ ccruser: cluster: - manage_ccr indices: - - names: [ 'allowed-index' ] + - names: [ 'allowed-index', 'logs-eu-*' ] privileges: - monitor - read diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java index 851a292ddae..60b9f8f23e8 100644 --- a/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.ccr; +import org.apache.http.HttpHost; import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -119,6 +120,45 @@ public class FollowIndexSecurityIT extends ESRestTestCase { } } + public void testAutoFollowPatterns() throws Exception { + assumeFalse("Test should only run when both clusters are running", runningAgainstLeaderCluster); + String allowedIndex = "logs-eu-20190101"; + String disallowedIndex = "logs-us-20190101"; + + Request request = new Request("PUT", "/_ccr/auto_follow/leader_cluster"); + request.setJsonEntity("{\"leader_index_patterns\": [\"logs-*\"]}"); + assertOK(client().performRequest(request)); + + try (RestClient leaderClient = buildLeaderClient()) { + for (String index : new String[]{allowedIndex, disallowedIndex}) { + Settings settings = Settings.builder() + .put("index.soft_deletes.enabled", true) + .build(); + String requestBody = "{\"settings\": " + Strings.toString(settings) + + ", \"mappings\": {\"_doc\": {\"properties\": {\"field\": {\"type\": \"keyword\"}}}} }"; + request = new Request("PUT", "/" + index); + request.setJsonEntity(requestBody); + assertOK(leaderClient.performRequest(request)); + + for (int i = 0; i < 5; i++) { + String id = Integer.toString(i); + index(leaderClient, index, id, "field", i, "filtered_field", "true"); + } + } + } + + assertBusy(() -> { + ensureYellow(allowedIndex); + verifyDocuments(adminClient(), allowedIndex, 5); + }); + assertThat(indexExists(adminClient(), disallowedIndex), is(false)); + + // Cleanup by deleting auto follow pattern and unfollowing: + request = new Request("DELETE", "/_ccr/auto_follow/leader_cluster"); + assertOK(client().performRequest(request)); + unfollowIndex(allowedIndex); + } + private int countCcrNodeTasks() throws IOException { final Request request = new Request("GET", "/_tasks"); request.addParameter("detailed", "true"); @@ -139,6 +179,10 @@ public class FollowIndexSecurityIT extends ESRestTestCase { } private static void index(String index, String id, Object... fields) throws IOException { + index(adminClient(), index, id, fields); + } + + private static void index(RestClient client, String index, String id, Object... fields) throws IOException { XContentBuilder document = jsonBuilder().startObject(); for (int i = 0; i < fields.length; i += 2) { document.field((String) fields[i], fields[i + 1]); @@ -146,7 +190,7 @@ public class FollowIndexSecurityIT extends ESRestTestCase { document.endObject(); final Request request = new Request("POST", "/" + index + "/_doc/" + id); request.setJsonEntity(Strings.toString(document)); - assertOK(adminClient().performRequest(request)); + assertOK(client.performRequest(request)); } private static void refresh(String index) throws IOException { @@ -201,11 +245,34 @@ public class FollowIndexSecurityIT extends ESRestTestCase { assertOK(adminClient().performRequest(request)); } + private static void ensureYellow(String index) throws IOException { + Request request = new Request("GET", "/_cluster/health/" + index); + request.addParameter("wait_for_status", "yellow"); + request.addParameter("wait_for_no_relocating_shards", "true"); + request.addParameter("wait_for_no_initializing_shards", "true"); + request.addParameter("timeout", "70s"); + request.addParameter("level", "shards"); + adminClient().performRequest(request); + } + + private RestClient buildLeaderClient() throws IOException { + assert runningAgainstLeaderCluster == false; + String leaderUrl = System.getProperty("tests.leader_host"); + int portSeparator = leaderUrl.lastIndexOf(':'); + HttpHost httpHost = new HttpHost(leaderUrl.substring(0, portSeparator), + Integer.parseInt(leaderUrl.substring(portSeparator + 1)), getProtocol()); + return buildClient(restAdminSettings(), new HttpHost[]{httpHost}); + } + private static boolean indexExists(RestClient client, String index) throws IOException { Response response = client.performRequest(new Request("HEAD", "/" + index)); return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode(); } + private static void unfollowIndex(String followIndex) throws IOException { + assertOK(client().performRequest(new Request("POST", "/" + followIndex + "/_ccr/unfollow"))); + } + private static void verifyCcrMonitoring(String expectedLeaderIndex, String expectedFollowerIndex) throws IOException { ensureYellow(".monitoring-*"); @@ -239,14 +306,4 @@ public class FollowIndexSecurityIT extends ESRestTestCase { assertThat(numberOfOperationsIndexed, greaterThanOrEqualTo(1)); } - private static void ensureYellow(String index) throws IOException { - Request request = new Request("GET", "/_cluster/health/" + index); - request.addParameter("wait_for_status", "yellow"); - request.addParameter("wait_for_no_relocating_shards", "true"); - request.addParameter("wait_for_no_initializing_shards", "true"); - request.addParameter("timeout", "70s"); - request.addParameter("level", "shards"); - adminClient().performRequest(request); - } - } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java index 2161d0a1423..c0000725887 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java @@ -7,17 +7,23 @@ package org.elasticsearch.xpack.ccr; import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.client.Client; +import org.elasticsearch.client.FilterClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; @@ -25,15 +31,19 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.license.RemoteClusterLicenseChecker; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.ccr.action.ShardFollowTask; import org.elasticsearch.xpack.core.XPackPlugin; import java.util.Collections; import java.util.Locale; +import java.util.Map; import java.util.Objects; import java.util.function.BiConsumer; import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; /** * Encapsulates licensing checking for CCR. @@ -93,6 +103,7 @@ public final class CcrLicenseChecker { request.indices(leaderIndex); checkRemoteClusterLicenseAndFetchClusterState( client, + Collections.emptyMap(), clusterAlias, request, onFailure, @@ -115,6 +126,7 @@ public final class CcrLicenseChecker { * * @param client the client * @param clusterAlias the remote cluster alias + * @param headers the headers to use for leader client * @param request the cluster state request * @param onFailure the failure consumer * @param leaderClusterStateConsumer the leader cluster state consumer @@ -122,12 +134,14 @@ public final class CcrLicenseChecker { */ public void checkRemoteClusterLicenseAndFetchClusterState( final Client client, + final Map headers, final String clusterAlias, final ClusterStateRequest request, final Consumer onFailure, final Consumer leaderClusterStateConsumer) { checkRemoteClusterLicenseAndFetchClusterState( client, + headers, clusterAlias, request, onFailure, @@ -144,6 +158,7 @@ public final class CcrLicenseChecker { * * @param client the client * @param clusterAlias the remote cluster alias + * @param headers the headers to use for leader client * @param request the cluster state request * @param onFailure the failure consumer * @param leaderClusterStateConsumer the leader cluster state consumer @@ -153,6 +168,7 @@ public final class CcrLicenseChecker { */ private void checkRemoteClusterLicenseAndFetchClusterState( final Client client, + final Map headers, final String clusterAlias, final ClusterStateRequest request, final Consumer onFailure, @@ -167,7 +183,7 @@ public final class CcrLicenseChecker { @Override public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck licenseCheck) { if (licenseCheck.isSuccess()) { - final Client leaderClient = client.getRemoteClusterClient(clusterAlias); + final Client leaderClient = wrapClient(client.getRemoteClusterClient(clusterAlias), headers); final ActionListener clusterStateListener = ActionListener.wrap(s -> leaderClusterStateConsumer.accept(s.getState()), onFailure); // following an index in remote cluster, so use remote client to fetch leader index metadata @@ -237,6 +253,33 @@ public final class CcrLicenseChecker { leaderClient.admin().indices().stats(request, ActionListener.wrap(indicesStatsHandler, onFailure)); } + public static Client wrapClient(Client client, Map headers) { + if (headers.isEmpty()) { + return client; + } else { + final ThreadContext threadContext = client.threadPool().getThreadContext(); + Map filteredHeaders = headers.entrySet().stream() + .filter(e -> ShardFollowTask.HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + return new FilterClient(client) { + @Override + protected + void doExecute(Action action, Request request, ActionListener listener) { + final Supplier supplier = threadContext.newRestorableContext(false); + try (ThreadContext.StoredContext ignore = stashWithHeaders(threadContext, filteredHeaders)) { + super.doExecute(action, request, new ContextPreservingActionListener<>(supplier, listener)); + } + } + }; + } + } + + private static ThreadContext.StoredContext stashWithHeaders(ThreadContext threadContext, Map headers) { + final ThreadContext.StoredContext storedContext = threadContext.stashContext(); + threadContext.copyHeaders(headers.entrySet()); + return storedContext; + } + private static ElasticsearchStatusException indexMetadataNonCompliantRemoteLicense( final String leaderIndex, final RemoteClusterLicenseChecker.LicenseCheck licenseCheck) { final String clusterAlias = licenseCheck.remoteClusterLicenseInfo().clusterAlias(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 722cbddde18..180e5e37990 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -103,19 +103,22 @@ public class AutoFollowCoordinator implements ClusterStateApplier { AutoFollower operation = new AutoFollower(handler, followerClusterState) { @Override - void getLeaderClusterState(final String leaderClusterAlias, final BiConsumer handler) { + void getLeaderClusterState(final Map headers, + final String leaderClusterAlias, + final BiConsumer handler) { final ClusterStateRequest request = new ClusterStateRequest(); request.clear(); request.metaData(true); if ("_local_".equals(leaderClusterAlias)) { + Client client = CcrLicenseChecker.wrapClient(AutoFollowCoordinator.this.client, headers); client.admin().cluster().state( request, ActionListener.wrap(r -> handler.accept(r.getState(), null), e -> handler.accept(null, e))); } else { - final Client leaderClient = client.getRemoteClusterClient(leaderClusterAlias); // TODO: set non-compliant status on auto-follow coordination that can be viewed via a stats API ccrLicenseChecker.checkRemoteClusterLicenseAndFetchClusterState( - leaderClient, + client, + headers, leaderClusterAlias, request, e -> handler.accept(null, e), @@ -125,15 +128,22 @@ public class AutoFollowCoordinator implements ClusterStateApplier { } @Override - void createAndFollow(FollowIndexAction.Request followRequest, + void createAndFollow(Map headers, + FollowIndexAction.Request followRequest, Runnable successHandler, Consumer failureHandler) { - client.execute(CreateAndFollowIndexAction.INSTANCE, new CreateAndFollowIndexAction.Request(followRequest), - ActionListener.wrap(r -> successHandler.run(), failureHandler)); + Client followerClient = CcrLicenseChecker.wrapClient(client, headers); + CreateAndFollowIndexAction.Request request = new CreateAndFollowIndexAction.Request(followRequest); + followerClient.execute( + CreateAndFollowIndexAction.INSTANCE, + request, + ActionListener.wrap(r -> successHandler.run(), failureHandler) + ); } @Override - void updateAutoFollowMetadata(Function updateFunction, Consumer handler) { + void updateAutoFollowMetadata(Function updateFunction, + Consumer handler) { clusterService.submitStateUpdateTask("update_auto_follow_metadata", new ClusterStateUpdateTask() { @Override @@ -188,7 +198,7 @@ public class AutoFollowCoordinator implements ClusterStateApplier { AutoFollowPattern autoFollowPattern = entry.getValue(); List followedIndices = autoFollowMetadata.getFollowedLeaderIndexUUIDs().get(clusterAlias); - getLeaderClusterState(clusterAlias, (leaderClusterState, e) -> { + getLeaderClusterState(autoFollowPattern.getHeaders(), clusterAlias, (leaderClusterState, e) -> { if (leaderClusterState != null) { assert e == null; handleClusterAlias(clusterAlias, autoFollowPattern, followedIndices, leaderClusterState); @@ -251,7 +261,7 @@ public class AutoFollowCoordinator implements ClusterStateApplier { finalise(followError); } }; - createAndFollow(followRequest, successHandler, failureHandler); + createAndFollow(autoFollowPattern.getHeaders(), followRequest, successHandler, failureHandler); } } } @@ -314,14 +324,27 @@ public class AutoFollowCoordinator implements ClusterStateApplier { /** * Fetch the cluster state from the leader with the specified cluster alias * + * @param headers the client headers * @param leaderClusterAlias the cluster alias of the leader * @param handler the callback to invoke */ - abstract void getLeaderClusterState(String leaderClusterAlias, BiConsumer handler); + abstract void getLeaderClusterState( + Map headers, + String leaderClusterAlias, + BiConsumer handler + ); - abstract void createAndFollow(FollowIndexAction.Request followRequest, Runnable successHandler, Consumer failureHandler); + abstract void createAndFollow( + Map headers, + FollowIndexAction.Request followRequest, + Runnable successHandler, + Consumer failureHandler + ); - abstract void updateAutoFollowMetadata(Function updateFunction, Consumer handler); + abstract void updateAutoFollowMetadata( + Function updateFunction, + Consumer handler + ); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 8e1c1a27a36..714e1fa289f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -5,18 +5,13 @@ */ package org.elasticsearch.xpack.ccr.action; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.ShardStats; -import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.client.Client; -import org.elasticsearch.client.FilterClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; @@ -24,7 +19,6 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.Index; import org.elasticsearch.index.seqno.SeqNoStats; @@ -48,8 +42,8 @@ import java.util.Optional; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.LongConsumer; -import java.util.function.Supplier; -import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.ccr.CcrLicenseChecker.wrapClient; public class ShardFollowTasksExecutor extends PersistentTasksExecutor { @@ -86,11 +80,11 @@ public class ShardFollowTasksExecutor extends PersistentTasksExecutor scheduler = (delay, command) -> { try { threadPool.schedule(delay, Ccr.CCR_THREAD_POOL_NAME, command); @@ -160,7 +154,7 @@ public class ShardFollowTasksExecutor extends PersistentTasksExecutor filteredHeaders = shardFollowTask.getHeaders().entrySet().stream() - .filter(e -> ShardFollowTask.HEADER_FILTERS.contains(e.getKey())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - return new FilterClient(client) { - @Override - protected - void doExecute(Action action, Request request, ActionListener listener) { - final Supplier supplier = threadContext.newRestorableContext(false); - try (ThreadContext.StoredContext ignore = stashWithHeaders(threadContext, filteredHeaders)) { - super.doExecute(action, request, new ContextPreservingActionListener<>(supplier, listener)); - } - } - }; - } - } - - private static ThreadContext.StoredContext stashWithHeaders(ThreadContext threadContext, Map headers) { - final ThreadContext.StoredContext storedContext = threadContext.stashContext(); - threadContext.copyHeaders(headers.entrySet()); - return storedContext; - } - } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java index 4afd51f56e6..748ba03f034 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java @@ -87,6 +87,10 @@ public class TransportPutAutoFollowPatternAction extends clusterStateRequest.clear(); clusterStateRequest.metaData(true); + Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() + .filter(e -> ShardFollowTask.HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + leaderClient.admin().cluster().state( clusterStateRequest, ActionListener.wrap( @@ -102,7 +106,7 @@ public class TransportPutAutoFollowPatternAction extends @Override public ClusterState execute(ClusterState currentState) throws Exception { - return innerPut(request, currentState, leaderClusterState); + return innerPut(request, filteredHeaders, currentState, leaderClusterState); } }); }, @@ -110,6 +114,7 @@ public class TransportPutAutoFollowPatternAction extends } static ClusterState innerPut(PutAutoFollowPatternAction.Request request, + Map filteredHeaders, ClusterState localState, ClusterState leaderClusterState) { // auto patterns are always overwritten @@ -151,8 +156,8 @@ public class TransportPutAutoFollowPatternAction extends request.getMaxConcurrentWriteBatches(), request.getMaxWriteBufferSize(), request.getMaxRetryDelay(), - request.getIdleShardRetryDelay() - ); + request.getIdleShardRetryDelay(), + filteredHeaders); patterns.put(request.getLeaderClusterAlias(), autoFollowPattern); ClusterState.Builder newState = ClusterState.builder(localState); newState.metaData(MetaData.builder(localState.getMetaData()) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java index cc617abc385..5ef7b4093ae 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/AutoFollowMetadataTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -37,10 +38,17 @@ public class AutoFollowMetadataTests extends AbstractSerializingTestCase> followedLeaderIndices = new HashMap<>(numEntries); for (int i = 0; i < numEntries; i++) { List leaderPatterns = Arrays.asList(generateRandomStringArray(4, 4, false)); - AutoFollowMetadata.AutoFollowPattern autoFollowPattern = - new AutoFollowMetadata.AutoFollowPattern(leaderPatterns, randomAlphaOfLength(4), randomIntBetween(0, Integer.MAX_VALUE), - randomIntBetween(0, Integer.MAX_VALUE), randomNonNegativeLong(), randomIntBetween(0, Integer.MAX_VALUE), - randomIntBetween(0, Integer.MAX_VALUE), TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(500)); + AutoFollowMetadata.AutoFollowPattern autoFollowPattern = new AutoFollowMetadata.AutoFollowPattern( + leaderPatterns, + randomAlphaOfLength(4), + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + randomNonNegativeLong(), + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + TimeValue.timeValueMillis(500), + TimeValue.timeValueMillis(500), + randomBoolean() ? null : Collections.singletonMap("key", "value")); configs.put(Integer.toString(i), autoFollowPattern); followedLeaderIndices.put(Integer.toString(i), Arrays.asList(generateRandomStringArray(4, 4, false))); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java index d8bf2872547..1e7e3fe42df 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java @@ -140,7 +140,7 @@ public class CcrLicenseIT extends ESSingleNodeTestCase { @Override public ClusterState execute(ClusterState currentState) throws Exception { AutoFollowPattern autoFollowPattern = - new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null); + new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null, null); AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata( Collections.singletonMap("test_alias", autoFollowPattern), Collections.emptyMap() diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 5ab11cf5b0c..31af326250c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -51,7 +51,7 @@ public class AutoFollowCoordinatorTests extends ESTestCase { .build(); AutoFollowPattern autoFollowPattern = - new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null); + new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null, null); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -69,19 +69,25 @@ public class AutoFollowCoordinatorTests extends ESTestCase { }; AutoFollower autoFollower = new AutoFollower(handler, currentState) { @Override - void getLeaderClusterState(String leaderClusterAlias, BiConsumer handler) { + void getLeaderClusterState(Map headers, + String leaderClusterAlias, + BiConsumer handler) { handler.accept(leaderState, null); } @Override - void createAndFollow(FollowIndexAction.Request followRequest, Runnable successHandler, Consumer failureHandler) { + void createAndFollow(Map headers, + FollowIndexAction.Request followRequest, + Runnable successHandler, + Consumer failureHandler) { assertThat(followRequest.getLeaderIndex(), equalTo("remote:logs-20190101")); assertThat(followRequest.getFollowerIndex(), equalTo("logs-20190101")); successHandler.run(); } @Override - void updateAutoFollowMetadata(Function updateFunction, Consumer handler) { + void updateAutoFollowMetadata(Function updateFunction, + Consumer handler) { ClusterState resultCs = updateFunction.apply(currentState); AutoFollowMetadata result = resultCs.metaData().custom(AutoFollowMetadata.TYPE); assertThat(result.getFollowedLeaderIndexUUIDs().size(), equalTo(1)); @@ -98,7 +104,7 @@ public class AutoFollowCoordinatorTests extends ESTestCase { when(client.getRemoteClusterClient(anyString())).thenReturn(client); AutoFollowPattern autoFollowPattern = - new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null); + new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null, null); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -116,17 +122,23 @@ public class AutoFollowCoordinatorTests extends ESTestCase { }; AutoFollower autoFollower = new AutoFollower(handler, followerState) { @Override - void getLeaderClusterState(String leaderClusterAlias, BiConsumer handler) { + void getLeaderClusterState(Map headers, + String leaderClusterAlias, + BiConsumer handler) { handler.accept(null, failure); } @Override - void createAndFollow(FollowIndexAction.Request followRequest, Runnable successHandler, Consumer failureHandler) { + void createAndFollow(Map headers, + FollowIndexAction.Request followRequest, + Runnable successHandler, + Consumer failureHandler) { fail("should not get here"); } @Override - void updateAutoFollowMetadata(Function updateFunction, Consumer handler) { + void updateAutoFollowMetadata(Function updateFunction, + Consumer handler) { fail("should not get here"); } }; @@ -146,7 +158,7 @@ public class AutoFollowCoordinatorTests extends ESTestCase { .build(); AutoFollowPattern autoFollowPattern = - new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null); + new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null, null); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -164,12 +176,17 @@ public class AutoFollowCoordinatorTests extends ESTestCase { }; AutoFollower autoFollower = new AutoFollower(handler, followerState) { @Override - void getLeaderClusterState(String leaderClusterAlias, BiConsumer handler) { + void getLeaderClusterState(Map headers, + String leaderClusterAlias, + BiConsumer handler) { handler.accept(leaderState, null); } @Override - void createAndFollow(FollowIndexAction.Request followRequest, Runnable successHandler, Consumer failureHandler) { + void createAndFollow(Map headers, + FollowIndexAction.Request followRequest, + Runnable successHandler, + Consumer failureHandler) { assertThat(followRequest.getLeaderIndex(), equalTo("remote:logs-20190101")); assertThat(followRequest.getFollowerIndex(), equalTo("logs-20190101")); successHandler.run(); @@ -196,7 +213,7 @@ public class AutoFollowCoordinatorTests extends ESTestCase { .build(); AutoFollowPattern autoFollowPattern = - new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null); + new AutoFollowPattern(Collections.singletonList("logs-*"), null, null, null, null, null, null, null, null, null); Map patterns = new HashMap<>(); patterns.put("remote", autoFollowPattern); Map> followedLeaderIndexUUIDS = new HashMap<>(); @@ -214,19 +231,25 @@ public class AutoFollowCoordinatorTests extends ESTestCase { }; AutoFollower autoFollower = new AutoFollower(handler, followerState) { @Override - void getLeaderClusterState(String leaderClusterAlias, BiConsumer handler) { + void getLeaderClusterState(Map headers, + String leaderClusterAlias, + BiConsumer handler) { handler.accept(leaderState, null); } @Override - void createAndFollow(FollowIndexAction.Request followRequest, Runnable successHandler, Consumer failureHandler) { + void createAndFollow(Map headers, + FollowIndexAction.Request followRequest, + Runnable successHandler, + Consumer failureHandler) { assertThat(followRequest.getLeaderIndex(), equalTo("remote:logs-20190101")); assertThat(followRequest.getFollowerIndex(), equalTo("logs-20190101")); failureHandler.accept(failure); } @Override - void updateAutoFollowMetadata(Function updateFunction, Consumer handler) { + void updateAutoFollowMetadata(Function updateFunction, + Consumer handler) { fail("should not get here"); } }; @@ -236,7 +259,7 @@ public class AutoFollowCoordinatorTests extends ESTestCase { public void testGetLeaderIndicesToFollow() { AutoFollowPattern autoFollowPattern = - new AutoFollowPattern(Collections.singletonList("metrics-*"), null, null, null, null, null, null, null, null); + new AutoFollowPattern(Collections.singletonList("metrics-*"), null, null, null, null, null, null, null, null, null); ClusterState followerState = ClusterState.builder(new ClusterName("remote")) .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(Collections.singletonMap("remote", autoFollowPattern), Collections.emptyMap()))) @@ -282,15 +305,15 @@ public class AutoFollowCoordinatorTests extends ESTestCase { public void testGetFollowerIndexName() { AutoFollowPattern autoFollowPattern = new AutoFollowPattern(Collections.singletonList("metrics-*"), null, null, - null, null, null, null, null, null); + null, null, null, null, null, null, null); assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("metrics-0")); autoFollowPattern = new AutoFollowPattern(Collections.singletonList("metrics-*"), "eu-metrics-0", null, null, - null, null, null, null, null); + null, null, null, null, null, null); assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("eu-metrics-0")); autoFollowPattern = new AutoFollowPattern(Collections.singletonList("metrics-*"), "eu-{{leader_index}}", null, - null, null, null, null, null, null); + null, null, null, null, null, null, null); assertThat(AutoFollower.getFollowerIndexName(autoFollowPattern, "metrics-0"), equalTo("eu-metrics-0")); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java index 303133d3d82..2525b63de31 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java @@ -30,7 +30,7 @@ public class TransportDeleteAutoFollowPatternActionTests extends ESTestCase { List existingPatterns = new ArrayList<>(); existingPatterns.add("transactions-*"); existingAutoFollowPatterns.put("eu_cluster", - new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null)); + new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null, null)); List existingUUIDS = new ArrayList<>(); existingUUIDS.add("_val"); @@ -40,7 +40,7 @@ public class TransportDeleteAutoFollowPatternActionTests extends ESTestCase { List existingPatterns = new ArrayList<>(); existingPatterns.add("logs-*"); existingAutoFollowPatterns.put("asia_cluster", - new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null)); + new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null, null)); List existingUUIDS = new ArrayList<>(); existingUUIDS.add("_val"); @@ -69,7 +69,7 @@ public class TransportDeleteAutoFollowPatternActionTests extends ESTestCase { List existingPatterns = new ArrayList<>(); existingPatterns.add("transactions-*"); existingAutoFollowPatterns.put("eu_cluster", - new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null)); + new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null, null)); } ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster")) .metaData(MetaData.builder().putCustom(AutoFollowMetadata.TYPE, diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java index 6e7341154c8..5731a64ba89 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java @@ -39,7 +39,7 @@ public class TransportPutAutoFollowPatternActionTests extends ESTestCase { .metaData(MetaData.builder()) .build(); - ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, localState, remoteState); + ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, null, localState, remoteState); AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata, notNullValue()); assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1)); @@ -78,7 +78,7 @@ public class TransportPutAutoFollowPatternActionTests extends ESTestCase { .metaData(mdBuilder) .build(); - ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, localState, remoteState); + ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, null, localState, remoteState); AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata, notNullValue()); assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1)); @@ -97,7 +97,7 @@ public class TransportPutAutoFollowPatternActionTests extends ESTestCase { List existingPatterns = new ArrayList<>(); existingPatterns.add("transactions-*"); existingAutoFollowPatterns.put("eu_cluster", - new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null)); + new AutoFollowMetadata.AutoFollowPattern(existingPatterns, null, null, null, null, null, null, null, null, null)); Map> existingAlreadyFollowedIndexUUIDS = new HashMap<>(); List existingUUIDS = new ArrayList<>(); existingUUIDS.add("_val"); @@ -120,7 +120,7 @@ public class TransportPutAutoFollowPatternActionTests extends ESTestCase { .metaData(mdBuilder) .build(); - ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, localState, remoteState); + ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, null, localState, remoteState); AutoFollowMetadata autoFollowMetadata = result.metaData().custom(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata, notNullValue()); assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java index 9c64ea3da76..cc4ea7b009e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java @@ -25,6 +25,7 @@ import org.elasticsearch.xpack.core.security.xcontent.XContentUtils; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.List; @@ -171,12 +172,14 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); public static final ParseField MAX_RETRY_DELAY = new ParseField("max_retry_delay"); public static final ParseField IDLE_SHARD_RETRY_DELAY = new ParseField("idle_shard_retry_delay"); + private static final ParseField HEADERS = new ParseField("headers"); @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("auto_follow_pattern", args -> new AutoFollowPattern((List) args[0], (String) args[1], (Integer) args[2], (Integer) args[3], - (Long) args[4], (Integer) args[5], (Integer) args[6], (TimeValue) args[7], (TimeValue) args[8])); + (Long) args[4], (Integer) args[5], (Integer) args[6], (TimeValue) args[7], (TimeValue) args[8], + (Map) args[9])); static { PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), LEADER_PATTERNS_FIELD); @@ -192,6 +195,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> TimeValue.parseTimeValue(p.text(), IDLE_SHARD_RETRY_DELAY.getPreferredName()), IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> p.mapStrings(), HEADERS); } private final List leaderIndexPatterns; @@ -203,10 +207,18 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i private final Integer maxWriteBufferSize; private final TimeValue maxRetryDelay; private final TimeValue idleShardRetryDelay; + private final Map headers; - public AutoFollowPattern(List leaderIndexPatterns, String followIndexPattern, Integer maxBatchOperationCount, - Integer maxConcurrentReadBatches, Long maxOperationSizeInBytes, Integer maxConcurrentWriteBatches, - Integer maxWriteBufferSize, TimeValue maxRetryDelay, TimeValue idleShardRetryDelay) { + public AutoFollowPattern(List leaderIndexPatterns, + String followIndexPattern, + Integer maxBatchOperationCount, + Integer maxConcurrentReadBatches, + Long maxOperationSizeInBytes, + Integer maxConcurrentWriteBatches, + Integer maxWriteBufferSize, + TimeValue maxRetryDelay, + TimeValue idleShardRetryDelay, + Map headers) { this.leaderIndexPatterns = leaderIndexPatterns; this.followIndexPattern = followIndexPattern; this.maxBatchOperationCount = maxBatchOperationCount; @@ -216,6 +228,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i this.maxWriteBufferSize = maxWriteBufferSize; this.maxRetryDelay = maxRetryDelay; this.idleShardRetryDelay = idleShardRetryDelay; + this.headers = headers != null ? Collections.unmodifiableMap(headers) : Collections.emptyMap(); } AutoFollowPattern(StreamInput in) throws IOException { @@ -228,6 +241,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i maxWriteBufferSize = in.readOptionalVInt(); maxRetryDelay = in.readOptionalTimeValue(); idleShardRetryDelay = in.readOptionalTimeValue(); + this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); } public boolean match(String indexName) { @@ -274,6 +288,10 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i return idleShardRetryDelay; } + public Map getHeaders() { + return headers; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringList(leaderIndexPatterns); @@ -285,6 +303,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i out.writeOptionalVInt(maxWriteBufferSize); out.writeOptionalTimeValue(maxRetryDelay); out.writeOptionalTimeValue(idleShardRetryDelay); + out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); } @Override @@ -314,6 +333,7 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i if (idleShardRetryDelay != null) { builder.field(IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay); } + builder.field(HEADERS.getPreferredName(), headers); return builder; } @@ -335,7 +355,8 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i Objects.equals(maxConcurrentWriteBatches, that.maxConcurrentWriteBatches) && Objects.equals(maxWriteBufferSize, that.maxWriteBufferSize) && Objects.equals(maxRetryDelay, that.maxRetryDelay) && - Objects.equals(idleShardRetryDelay, that.idleShardRetryDelay); + Objects.equals(idleShardRetryDelay, that.idleShardRetryDelay) && + Objects.equals(headers, that.headers); } @Override @@ -349,7 +370,8 @@ public class AutoFollowMetadata extends AbstractNamedDiffable i maxConcurrentWriteBatches, maxWriteBufferSize, maxRetryDelay, - idleShardRetryDelay + idleShardRetryDelay, + headers ); } } From 34379887b4734f713db0e01b48835c5e7384d9ad Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 17 Sep 2018 07:51:34 +0200 Subject: [PATCH 10/27] Make custom index metadata completely immutable (#33735) Currently `IndexMetadata#getCustomData(...)` wraps the custom metadata in an unmodifiable map, but in case there is no entry for the specified key then a NPE is thrown by Collections.unmodifiableMap(...). This is not ideal in case callers like to throw an exception with a specific message. (like in the case for ccr to indicate that the follow index was not created by the create_and_follow api and therefor incompatible as follow index) I think making `DiffableStringMap` itself immutable is better then just wrapping custom metadata with `Collections.unmodifiableMap(...)` in all methods that access it. Also removed the `equals()`, `hashcode()` and to `toString()` methods of `DiffableStringMap`, because `AbstractMap` already implements these methods. --- .../cluster/metadata/DiffableStringMap.java | 35 ++----------------- .../cluster/metadata/IndexMetaData.java | 2 +- .../metadata/DiffableStringMapTests.java | 4 +-- 3 files changed, 5 insertions(+), 36 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java index 4aa429f5704..46433eed8a6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java @@ -42,17 +42,12 @@ public class DiffableStringMap extends AbstractMap implements Di private final Map innerMap; DiffableStringMap(final Map map) { - this.innerMap = map; + this.innerMap = Collections.unmodifiableMap(map); } @SuppressWarnings("unchecked") DiffableStringMap(final StreamInput in) throws IOException { - this.innerMap = (Map) (Map) in.readMap(); - } - - @Override - public String put(String key, String value) { - return innerMap.put(key, value); + this((Map) (Map) in.readMap()); } @Override @@ -75,32 +70,6 @@ public class DiffableStringMap extends AbstractMap implements Di return new DiffableStringMapDiff(in); } - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (obj instanceof DiffableStringMap) { - DiffableStringMap other = (DiffableStringMap) obj; - return innerMap.equals(other.innerMap); - } else if (obj instanceof Map) { - Map other = (Map) obj; - return innerMap.equals(other); - } else { - return false; - } - } - - @Override - public int hashCode() { - return innerMap.hashCode(); - } - - @Override - public String toString() { - return "DiffableStringMap[" + innerMap.toString() + "]"; - } - /** * Represents differences between two DiffableStringMaps. */ diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 54089abae7e..c1e70191417 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -466,7 +466,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen } public Map getCustomData(final String key) { - return Collections.unmodifiableMap(this.customData.get(key)); + return this.customData.get(key); } public ImmutableOpenIntMap> getInSyncAllocationIds() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java index 341022030b3..58d03f10a4e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DiffableStringMapTests.java @@ -70,7 +70,7 @@ public class DiffableStringMapTests extends ESTestCase { m.put("2", "2"); m.put("3", "3"); DiffableStringMap dsm = new DiffableStringMap(m); - DiffableStringMap expected = new DiffableStringMap(m); + Map expected = new HashMap<>(m); for (int i = 0; i < randomIntBetween(5, 50); i++) { if (randomBoolean() && expected.size() > 1) { @@ -80,7 +80,7 @@ public class DiffableStringMapTests extends ESTestCase { } else { expected.put(randomAlphaOfLength(2), randomAlphaOfLength(4)); } - dsm = expected.diff(dsm).apply(dsm); + dsm = new DiffableStringMap(expected).diff(dsm).apply(dsm); } assertThat(expected, equalTo(dsm)); } From e77835c6f5c143e0f8c4a8192528fb70b6c801df Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 17 Sep 2018 09:10:23 +0200 Subject: [PATCH 11/27] Add create rollup job api to high level rest client (#33521) This commit adds the Create Rollup Job API to the high level REST client. It supersedes #32703 and adds dedicated request/response objects so that it does not depend on server side components. Related #29827 --- .../client/RestHighLevelClient.java | 13 + .../elasticsearch/client/RollupClient.java | 76 +++++ .../client/RollupRequestConverters.java | 45 +++ .../client/ValidationException.java | 17 +- .../client/rollup/PutRollupJobRequest.java | 65 ++++ .../client/rollup/PutRollupJobResponse.java | 80 +++++ .../job/config/DateHistogramGroupConfig.java | 189 +++++++++++ .../client/rollup/job/config/GroupConfig.java | 171 ++++++++++ .../job/config/HistogramGroupConfig.java | 127 ++++++++ .../rollup/job/config/MetricConfig.java | 135 ++++++++ .../rollup/job/config/RollupJobConfig.java | 242 ++++++++++++++ .../rollup/job/config/TermsGroupConfig.java | 115 +++++++ .../client/RestHighLevelClientTests.java | 1 + .../org/elasticsearch/client/RollupIT.java | 162 +++++++++ .../documentation/RollupDocumentationIT.java | 163 +++++++++ .../rollup/PutRollupJobRequestTests.java | 59 ++++ .../rollup/PutRollupJobResponseTests.java | 50 +++ .../config/DateHistogramGroupConfigTests.java | 98 ++++++ .../rollup/job/config/GroupConfigTests.java | 116 +++++++ .../job/config/HistogramGroupConfigTests.java | 109 +++++++ .../rollup/job/config/MetricConfigTests.java | 127 ++++++++ .../job/config/RollupJobConfigTests.java | 308 ++++++++++++++++++ .../job/config/TermsGroupConfigTests.java | 87 +++++ .../high-level/rollup/put_job.asciidoc | 172 ++++++++++ .../high-level/supported-apis.asciidoc | 8 + .../rollup/rest/RestPutRollupJobAction.java | 14 +- 26 files changed, 2738 insertions(+), 11 deletions(-) create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobRequest.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/DateHistogramGroupConfig.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/GroupConfig.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/HistogramGroupConfig.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/MetricConfig.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/RollupJobConfig.java create mode 100644 client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/TermsGroupConfig.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobRequestTests.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobResponseTests.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/DateHistogramGroupConfigTests.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/GroupConfigTests.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/HistogramGroupConfigTests.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/MetricConfigTests.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/RollupJobConfigTests.java create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/TermsGroupConfigTests.java create mode 100644 docs/java-rest/high-level/rollup/put_job.asciidoc diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 687290abe88..ae1766fab02 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -219,6 +219,7 @@ public class RestHighLevelClient implements Closeable { private final MigrationClient migrationClient = new MigrationClient(this); private final MachineLearningClient machineLearningClient = new MachineLearningClient(this); private final SecurityClient securityClient = new SecurityClient(this); + private final RollupClient rollupClient = new RollupClient(this); /** * Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the @@ -300,6 +301,18 @@ public class RestHighLevelClient implements Closeable { return snapshotClient; } + /** + * Provides methods for accessing the Elastic Licensed Rollup APIs that + * are shipped with the default distribution of Elasticsearch. All of + * these APIs will 404 if run against the OSS distribution of Elasticsearch. + *

+ * See the + * Watcher APIs on elastic.co for more information. + */ + public RollupClient rollup() { + return rollupClient; + } + /** * Provides a {@link TasksClient} which can be used to access the Tasks API. * diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java new file mode 100644 index 00000000000..1a766cb4923 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.rollup.PutRollupJobRequest; +import org.elasticsearch.client.rollup.PutRollupJobResponse; + +import java.io.IOException; +import java.util.Collections; + +/** + * A wrapper for the {@link RestHighLevelClient} that provides methods for + * accessing the Elastic Rollup-related methods + *

+ * See the + * X-Pack Rollup APIs on elastic.co for more information. + */ +public class RollupClient { + + private final RestHighLevelClient restHighLevelClient; + + RollupClient(final RestHighLevelClient restHighLevelClient) { + this.restHighLevelClient = restHighLevelClient; + } + + /** + * Put a rollup job into the cluster + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public PutRollupJobResponse putRollupJob(PutRollupJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + RollupRequestConverters::putJob, + options, + PutRollupJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Asynchronously put a rollup job into the cluster + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void putRollupJobAsync(PutRollupJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + RollupRequestConverters::putJob, + options, + PutRollupJobResponse::fromXContent, + listener, Collections.emptySet()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java new file mode 100644 index 00000000000..f1c4f77ae4c --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.rollup.PutRollupJobRequest; + +import java.io.IOException; + +import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE; +import static org.elasticsearch.client.RequestConverters.createEntity; + +final class RollupRequestConverters { + + private RollupRequestConverters() { + } + + static Request putJob(final PutRollupJobRequest putRollupJobRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("rollup") + .addPathPartAsIs("job") + .addPathPart(putRollupJobRequest.getConfig().getId()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + request.setEntity(createEntity(putRollupJobRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java index 6b5d738d675..730ea7e95de 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ValidationException.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.client; +import org.elasticsearch.common.Nullable; + import java.util.ArrayList; import java.util.List; @@ -31,10 +33,23 @@ public class ValidationException extends IllegalArgumentException { * Add a new validation error to the accumulating validation errors * @param error the error to add */ - public void addValidationError(String error) { + public void addValidationError(final String error) { validationErrors.add(error); } + /** + * Adds validation errors from an existing {@link ValidationException} to + * the accumulating validation errors + * @param exception the {@link ValidationException} to add errors from + */ + public final void addValidationErrors(final @Nullable ValidationException exception) { + if (exception != null) { + for (String error : exception.validationErrors()) { + addValidationError(error); + } + } + } + /** * Returns the validation errors accumulated */ diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobRequest.java new file mode 100644 index 00000000000..42b786d38ed --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobRequest.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.client.rollup.job.config.RollupJobConfig; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; +import java.util.Optional; + +public class PutRollupJobRequest implements Validatable, ToXContentObject { + + private final RollupJobConfig config; + + public PutRollupJobRequest(final RollupJobConfig config) { + this.config = Objects.requireNonNull(config, "rollup job configuration is required"); + } + + public RollupJobConfig getConfig() { + return config; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return config.toXContent(builder, params); + } + + @Override + public Optional validate() { + return config.validate(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final PutRollupJobRequest that = (PutRollupJobRequest) o; + return Objects.equals(config, that.config); + } + + @Override + public int hashCode() { + return Objects.hash(config); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java new file mode 100644 index 00000000000..0c55bd419cb --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public class PutRollupJobResponse implements ToXContentObject { + + private final boolean acknowledged; + + public PutRollupJobResponse(final boolean acknowledged) { + this.acknowledged = acknowledged; + } + + public boolean isAcknowledged() { + return acknowledged; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final PutRollupJobResponse that = (PutRollupJobResponse) o; + return isAcknowledged() == that.isAcknowledged(); + } + + @Override + public int hashCode() { + return Objects.hash(acknowledged); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("acknowledged", isAcknowledged()); + } + builder.endObject(); + return builder; + } + + private static final ConstructingObjectParser PARSER + = new ConstructingObjectParser<>("put_rollup_job_response", true, args -> new PutRollupJobResponse((boolean) args[0])); + static { + PARSER.declareBoolean(constructorArg(), new ParseField("acknowledged")); + } + + public static PutRollupJobResponse fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/DateHistogramGroupConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/DateHistogramGroupConfig.java new file mode 100644 index 00000000000..21a610f7894 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/DateHistogramGroupConfig.java @@ -0,0 +1,189 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup.job.config; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.Objects; +import java.util.Optional; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.common.xcontent.ObjectParser.ValueType; + +/** + * The configuration object for the histograms in the rollup config + * + * { + * "groups": [ + * "date_histogram": { + * "field" : "foo", + * "interval" : "1d", + * "delay": "30d", + * "time_zone" : "EST" + * } + * ] + * } + */ +public class DateHistogramGroupConfig implements Validatable, ToXContentObject { + + static final String NAME = "date_histogram"; + private static final String INTERVAL = "interval"; + private static final String FIELD = "field"; + private static final String TIME_ZONE = "time_zone"; + private static final String DELAY = "delay"; + private static final String DEFAULT_TIMEZONE = "UTC"; + + private static final ConstructingObjectParser PARSER; + static { + PARSER = new ConstructingObjectParser<>(NAME, true, a -> + new DateHistogramGroupConfig((String) a[0], (DateHistogramInterval) a[1], (DateHistogramInterval) a[2], (String) a[3])); + PARSER.declareString(constructorArg(), new ParseField(FIELD)); + PARSER.declareField(constructorArg(), p -> new DateHistogramInterval(p.text()), new ParseField(INTERVAL), ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), p -> new DateHistogramInterval(p.text()), new ParseField(DELAY), ValueType.STRING); + PARSER.declareString(optionalConstructorArg(), new ParseField(TIME_ZONE)); + } + + private final String field; + private final DateHistogramInterval interval; + private final DateHistogramInterval delay; + private final String timeZone; + + /** + * Create a new {@link DateHistogramGroupConfig} using the given field and interval parameters. + */ + public DateHistogramGroupConfig(final String field, final DateHistogramInterval interval) { + this(field, interval, null, null); + } + + /** + * Create a new {@link DateHistogramGroupConfig} using the given configuration parameters. + *

+ * The {@code field} and {@code interval} are required to compute the date histogram for the rolled up documents. + * The {@code delay} is optional and can be set to {@code null}. It defines how long to wait before rolling up new documents. + * The {@code timeZone} is optional and can be set to {@code null}. When configured, the time zone value is resolved using + * ({@link DateTimeZone#forID(String)} and must match a time zone identifier provided by the Joda Time library. + *

+ * + * @param field the name of the date field to use for the date histogram (required) + * @param interval the interval to use for the date histogram (required) + * @param delay the time delay (optional) + * @param timeZone the id of time zone to use to calculate the date histogram (optional). When {@code null}, the UTC timezone is used. + */ + public DateHistogramGroupConfig(final String field, + final DateHistogramInterval interval, + final @Nullable DateHistogramInterval delay, + final @Nullable String timeZone) { + this.field = field; + this.interval = interval; + this.delay = delay; + this.timeZone = (timeZone != null && timeZone.isEmpty() == false) ? timeZone : DEFAULT_TIMEZONE; + } + + @Override + public Optional validate() { + final ValidationException validationException = new ValidationException(); + if (field == null || field.isEmpty()) { + validationException.addValidationError("Field name is required"); + } + if (interval == null) { + validationException.addValidationError("Interval is required"); + } + if (validationException.validationErrors().isEmpty()) { + return Optional.empty(); + } + return Optional.of(validationException); + } + + /** + * Get the date field + */ + public String getField() { + return field; + } + + /** + * Get the date interval + */ + public DateHistogramInterval getInterval() { + return interval; + } + + /** + * Get the time delay for this histogram + */ + public DateHistogramInterval getDelay() { + return delay; + } + + /** + * Get the timezone to apply + */ + public String getTimeZone() { + return timeZone; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + { + builder.field(INTERVAL, interval.toString()); + builder.field(FIELD, field); + if (delay != null) { + builder.field(DELAY, delay.toString()); + } + builder.field(TIME_ZONE, timeZone); + } + return builder.endObject(); + } + + @Override + public boolean equals(final Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + final DateHistogramGroupConfig that = (DateHistogramGroupConfig) other; + return Objects.equals(interval, that.interval) + && Objects.equals(field, that.field) + && Objects.equals(delay, that.delay) + && Objects.equals(timeZone, that.timeZone); + } + + @Override + public int hashCode() { + return Objects.hash(interval, field, delay, timeZone); + } + + public static DateHistogramGroupConfig fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/GroupConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/GroupConfig.java new file mode 100644 index 00000000000..59a0398e4d9 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/GroupConfig.java @@ -0,0 +1,171 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup.job.config; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; +import java.util.Optional; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * The configuration object for the groups section in the rollup config. + * Basically just a wrapper for histo/date histo/terms objects + * + * { + * "groups": [ + * "date_histogram": {...}, + * "histogram" : {...}, + * "terms" : {...} + * ] + * } + */ +public class GroupConfig implements Validatable, ToXContentObject { + + static final String NAME = "groups"; + private static final ConstructingObjectParser PARSER; + static { + PARSER = new ConstructingObjectParser<>(NAME, true, args -> + new GroupConfig((DateHistogramGroupConfig) args[0], (HistogramGroupConfig) args[1], (TermsGroupConfig) args[2])); + PARSER.declareObject(constructorArg(), + (p, c) -> DateHistogramGroupConfig.fromXContent(p), new ParseField(DateHistogramGroupConfig.NAME)); + PARSER.declareObject(optionalConstructorArg(), + (p, c) -> HistogramGroupConfig.fromXContent(p), new ParseField(HistogramGroupConfig.NAME)); + PARSER.declareObject(optionalConstructorArg(), + (p, c) -> TermsGroupConfig.fromXContent(p), new ParseField(TermsGroupConfig.NAME)); + } + + private final DateHistogramGroupConfig dateHistogram; + private final @Nullable + HistogramGroupConfig histogram; + private final @Nullable + TermsGroupConfig terms; + + public GroupConfig(final DateHistogramGroupConfig dateHistogram) { + this(dateHistogram, null, null); + } + + public GroupConfig(final DateHistogramGroupConfig dateHistogram, + final @Nullable HistogramGroupConfig histogram, + final @Nullable TermsGroupConfig terms) { + this.dateHistogram = dateHistogram; + this.histogram = histogram; + this.terms = terms; + } + + @Override + public Optional validate() { + final ValidationException validationException = new ValidationException(); + if (dateHistogram != null) { + final Optional dateHistogramValidationErrors = dateHistogram.validate(); + if (dateHistogramValidationErrors != null && dateHistogramValidationErrors.isPresent()) { + validationException.addValidationErrors(dateHistogramValidationErrors.get()); + } + } else { + validationException.addValidationError("Date histogram must not be null"); + } + if (histogram != null) { + final Optional histogramValidationErrors = histogram.validate(); + if (histogramValidationErrors != null && histogramValidationErrors.isPresent()) { + validationException.addValidationErrors(histogramValidationErrors.get()); + } + } + if (terms != null) { + final Optional termsValidationErrors = terms.validate(); + if (termsValidationErrors != null && termsValidationErrors.isPresent()) { + validationException.addValidationErrors(termsValidationErrors.get()); + } + } + if (validationException.validationErrors().isEmpty()) { + return Optional.empty(); + } + return Optional.of(validationException); + } + + /** + * @return the configuration of the date histogram + */ + public DateHistogramGroupConfig getDateHistogram() { + return dateHistogram; + } + + /** + * @return the configuration of the histogram + */ + @Nullable + public HistogramGroupConfig getHistogram() { + return histogram; + } + + /** + * @return the configuration of the terms + */ + @Nullable + public TermsGroupConfig getTerms() { + return terms; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(DateHistogramGroupConfig.NAME, dateHistogram); + if (histogram != null) { + builder.field(HistogramGroupConfig.NAME, histogram); + } + if (terms != null) { + builder.field(TermsGroupConfig.NAME, terms); + } + } + return builder.endObject(); + } + + @Override + public boolean equals(final Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + final GroupConfig that = (GroupConfig) other; + return Objects.equals(dateHistogram, that.dateHistogram) + && Objects.equals(histogram, that.histogram) + && Objects.equals(terms, that.terms); + } + + @Override + public int hashCode() { + return Objects.hash(dateHistogram, histogram, terms); + } + + public static GroupConfig fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/HistogramGroupConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/HistogramGroupConfig.java new file mode 100644 index 00000000000..95f6002f6f8 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/HistogramGroupConfig.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup.job.config; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * The configuration object for the histograms in the rollup config + * + * { + * "groups": [ + * "histogram": { + * "fields" : [ "foo", "bar" ], + * "interval" : 123 + * } + * ] + * } + */ +public class HistogramGroupConfig implements Validatable, ToXContentObject { + + static final String NAME = "histogram"; + private static final String INTERVAL = "interval"; + private static final String FIELDS = "fields"; + + private static final ConstructingObjectParser PARSER; + static { + PARSER = new ConstructingObjectParser<>(NAME, true, args -> { + @SuppressWarnings("unchecked") List fields = (List) args[1]; + return new HistogramGroupConfig((long) args[0], fields != null ? fields.toArray(new String[fields.size()]) : null); + }); + PARSER.declareLong(constructorArg(), new ParseField(INTERVAL)); + PARSER.declareStringArray(constructorArg(), new ParseField(FIELDS)); + } + + private final long interval; + private final String[] fields; + + public HistogramGroupConfig(final long interval, final String... fields) { + this.interval = interval; + this.fields = fields; + } + + @Override + public Optional validate() { + final ValidationException validationException = new ValidationException(); + if (fields == null || fields.length == 0) { + validationException.addValidationError("Fields must have at least one value"); + } + if (interval <= 0) { + validationException.addValidationError("Interval must be a positive long"); + } + if (validationException.validationErrors().isEmpty()) { + return Optional.empty(); + } + return Optional.of(validationException); + } + + public long getInterval() { + return interval; + } + + public String[] getFields() { + return fields; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + { + builder.field(INTERVAL, interval); + builder.field(FIELDS, fields); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(final Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + final HistogramGroupConfig that = (HistogramGroupConfig) other; + return Objects.equals(interval, that.interval) && Arrays.equals(fields, that.fields); + } + + @Override + public int hashCode() { + return Objects.hash(interval, Arrays.hashCode(fields)); + } + + public static HistogramGroupConfig fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/MetricConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/MetricConfig.java new file mode 100644 index 00000000000..4ba7404c470 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/MetricConfig.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup.job.config; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * The configuration object for the metrics portion of a rollup job config + * + * { + * "metrics": [ + * { + * "field": "foo", + * "metrics": [ "min", "max", "sum"] + * }, + * { + * "field": "bar", + * "metrics": [ "max" ] + * } + * ] + * } + */ +public class MetricConfig implements Validatable, ToXContentObject { + + static final String NAME = "metrics"; + private static final String FIELD = "field"; + private static final String METRICS = "metrics"; + + private static final ConstructingObjectParser PARSER; + static { + PARSER = new ConstructingObjectParser<>(NAME, true, args -> { + @SuppressWarnings("unchecked") List metrics = (List) args[1]; + return new MetricConfig((String) args[0], metrics); + }); + PARSER.declareString(constructorArg(), new ParseField(FIELD)); + PARSER.declareStringArray(constructorArg(), new ParseField(METRICS)); + } + + private final String field; + private final List metrics; + + public MetricConfig(final String field, final List metrics) { + this.field = field; + this.metrics = metrics; + } + + @Override + public Optional validate() { + final ValidationException validationException = new ValidationException(); + if (field == null || field.isEmpty()) { + validationException.addValidationError("Field name is required"); + } + if (metrics == null || metrics.isEmpty()) { + validationException.addValidationError("Metrics must be a non-null, non-empty array of strings"); + } + if (validationException.validationErrors().isEmpty()) { + return Optional.empty(); + } + return Optional.of(validationException); + } + + /** + * @return the name of the field used in the metric configuration. Never {@code null}. + */ + public String getField() { + return field; + } + + /** + * @return the names of the metrics used in the metric configuration. Never {@code null}. + */ + public List getMetrics() { + return metrics; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + { + builder.field(FIELD, field); + builder.field(METRICS, metrics); + } + return builder.endObject(); + } + + @Override + public boolean equals(final Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + final MetricConfig that = (MetricConfig) other; + return Objects.equals(field, that.field) && Objects.equals(metrics, that.metrics); + } + + @Override + public int hashCode() { + return Objects.hash(field, metrics); + } + + public static MetricConfig fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/RollupJobConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/RollupJobConfig.java new file mode 100644 index 00000000000..d8e87eeb3d5 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/RollupJobConfig.java @@ -0,0 +1,242 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup.job.config; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * This class holds the configuration details of a rollup job, such as the groupings, metrics, what + * index to rollup and where to roll them to. + */ +public class RollupJobConfig implements Validatable, ToXContentObject { + + private static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueSeconds(20); + private static final String ID = "id"; + private static final String TIMEOUT = "timeout"; + private static final String CRON = "cron"; + private static final String PAGE_SIZE = "page_size"; + private static final String INDEX_PATTERN = "index_pattern"; + private static final String ROLLUP_INDEX = "rollup_index"; + + private final String id; + private final String indexPattern; + private final String rollupIndex; + private final GroupConfig groupConfig; + private final List metricsConfig; + private final TimeValue timeout; + private final String cron; + private final int pageSize; + + private static final ConstructingObjectParser PARSER; + static { + PARSER = new ConstructingObjectParser<>("rollup_job_config", true, (args, optionalId) -> { + String id = args[0] != null ? (String) args[0] : optionalId; + String indexPattern = (String) args[1]; + String rollupIndex = (String) args[2]; + GroupConfig groupConfig = (GroupConfig) args[3]; + @SuppressWarnings("unchecked") + List metricsConfig = (List) args[4]; + TimeValue timeout = (TimeValue) args[5]; + String cron = (String) args[6]; + int pageSize = (int) args[7]; + return new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groupConfig, metricsConfig, timeout); + }); + PARSER.declareString(optionalConstructorArg(), new ParseField(ID)); + PARSER.declareString(constructorArg(), new ParseField(INDEX_PATTERN)); + PARSER.declareString(constructorArg(), new ParseField(ROLLUP_INDEX)); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> GroupConfig.fromXContent(p), new ParseField(GroupConfig.NAME)); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> MetricConfig.fromXContent(p), new ParseField(MetricConfig.NAME)); + PARSER.declareField(optionalConstructorArg(), (p, c) -> TimeValue.parseTimeValue(p.textOrNull(), TIMEOUT), + new ParseField(TIMEOUT), ObjectParser.ValueType.STRING_OR_NULL); + PARSER.declareString(constructorArg(), new ParseField(CRON)); + PARSER.declareInt(constructorArg(), new ParseField(PAGE_SIZE)); + } + + public RollupJobConfig(final String id, + final String indexPattern, + final String rollupIndex, + final String cron, + final int pageSize, + final GroupConfig groupConfig, + final List metricsConfig, + final @Nullable TimeValue timeout) { + this.id = id; + this.indexPattern = indexPattern; + this.rollupIndex = rollupIndex; + this.groupConfig = groupConfig; + this.metricsConfig = metricsConfig != null ? metricsConfig : Collections.emptyList(); + this.timeout = timeout != null ? timeout : DEFAULT_TIMEOUT; + this.cron = cron; + this.pageSize = pageSize; + } + + @Override + public Optional validate() { + final ValidationException validationException = new ValidationException(); + if (id == null || id.isEmpty()) { + validationException.addValidationError("Id must be a non-null, non-empty string"); + } + if (indexPattern == null || indexPattern.isEmpty()) { + validationException.addValidationError("Index pattern must be a non-null, non-empty string"); + } else if (Regex.isMatchAllPattern(indexPattern)) { + validationException.addValidationError("Index pattern must not match all indices (as it would match it's own rollup index"); + } else if (indexPattern != null && indexPattern.equals(rollupIndex)) { + validationException.addValidationError("Rollup index may not be the same as the index pattern"); + } else if (Regex.isSimpleMatchPattern(indexPattern) && Regex.simpleMatch(indexPattern, rollupIndex)) { + validationException.addValidationError("Index pattern would match rollup index name which is not allowed"); + } + + if (rollupIndex == null || rollupIndex.isEmpty()) { + validationException.addValidationError("Rollup index must be a non-null, non-empty string"); + } + if (cron == null || cron.isEmpty()) { + validationException.addValidationError("Cron schedule must be a non-null, non-empty string"); + } + if (pageSize <= 0) { + validationException.addValidationError("Page size is mandatory and must be a positive long"); + } + if (groupConfig == null && (metricsConfig == null || metricsConfig.isEmpty())) { + validationException.addValidationError("At least one grouping or metric must be configured"); + } + if (groupConfig != null) { + final Optional groupValidationErrors = groupConfig.validate(); + if (groupValidationErrors != null && groupValidationErrors.isPresent()) { + validationException.addValidationErrors(groupValidationErrors.get()); + } + } + if (metricsConfig != null) { + for (MetricConfig metricConfig : metricsConfig) { + final Optional metricsValidationErrors = metricConfig.validate(); + if (metricsValidationErrors != null && metricsValidationErrors.isPresent()) { + validationException.addValidationErrors(metricsValidationErrors.get()); + } + } + } + if (validationException.validationErrors().isEmpty()) { + return Optional.empty(); + } + return Optional.of(validationException); + } + + public String getId() { + return id; + } + + public GroupConfig getGroupConfig() { + return groupConfig; + } + + public List getMetricsConfig() { + return metricsConfig; + } + + public TimeValue getTimeout() { + return timeout; + } + + public String getIndexPattern() { + return indexPattern; + } + + public String getRollupIndex() { + return rollupIndex; + } + + public String getCron() { + return cron; + } + + public int getPageSize() { + return pageSize; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + { + builder.field(ID, id); + builder.field(INDEX_PATTERN, indexPattern); + builder.field(ROLLUP_INDEX, rollupIndex); + builder.field(CRON, cron); + if (groupConfig != null) { + builder.field(GroupConfig.NAME, groupConfig); + } + if (metricsConfig != null) { + builder.startArray(MetricConfig.NAME); + for (MetricConfig metric : metricsConfig) { + metric.toXContent(builder, params); + } + builder.endArray(); + } + if (timeout != null) { + builder.field(TIMEOUT, timeout.getStringRep()); + } + builder.field(PAGE_SIZE, pageSize); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + final RollupJobConfig that = (RollupJobConfig) other; + return Objects.equals(this.id, that.id) + && Objects.equals(this.indexPattern, that.indexPattern) + && Objects.equals(this.rollupIndex, that.rollupIndex) + && Objects.equals(this.cron, that.cron) + && Objects.equals(this.groupConfig, that.groupConfig) + && Objects.equals(this.metricsConfig, that.metricsConfig) + && Objects.equals(this.timeout, that.timeout) + && Objects.equals(this.pageSize, that.pageSize); + } + + @Override + public int hashCode() { + return Objects.hash(id, indexPattern, rollupIndex, cron, groupConfig, metricsConfig, timeout, pageSize); + } + + public static RollupJobConfig fromXContent(final XContentParser parser, @Nullable final String optionalJobId) throws IOException { + return PARSER.parse(parser, optionalJobId); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/TermsGroupConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/TermsGroupConfig.java new file mode 100644 index 00000000000..5df2bba5936 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/job/config/TermsGroupConfig.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup.job.config; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * The configuration object for the histograms in the rollup config + * + * { + * "groups": [ + * "terms": { + * "fields" : [ "foo", "bar" ] + * } + * ] + * } + */ +public class TermsGroupConfig implements Validatable, ToXContentObject { + + static final String NAME = "terms"; + private static final String FIELDS = "fields"; + + private static final ConstructingObjectParser PARSER; + static { + PARSER = new ConstructingObjectParser<>(NAME, true, args -> { + @SuppressWarnings("unchecked") List fields = (List) args[0]; + return new TermsGroupConfig(fields != null ? fields.toArray(new String[fields.size()]) : null); + }); + PARSER.declareStringArray(constructorArg(), new ParseField(FIELDS)); + } + + private final String[] fields; + + public TermsGroupConfig(final String... fields) { + this.fields = fields; + } + + @Override + public Optional validate() { + final ValidationException validationException = new ValidationException(); + if (fields == null || fields.length == 0) { + validationException.addValidationError("Fields must have at least one value"); + } + if (validationException.validationErrors().isEmpty()) { + return Optional.empty(); + } + return Optional.of(validationException); + } + + /** + * @return the names of the fields. Never {@code null}. + */ + public String[] getFields() { + return fields; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(FIELDS, fields); + } + return builder.endObject(); + } + + @Override + public boolean equals(final Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + final TermsGroupConfig that = (TermsGroupConfig) other; + return Arrays.equals(fields, that.fields); + } + + @Override + public int hashCode() { + return Arrays.hashCode(fields); + } + + public static TermsGroupConfig fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 3bd47306e5e..ca6043768df 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -754,6 +754,7 @@ public class RestHighLevelClientTests extends ESTestCase { if (apiName.startsWith("xpack.") == false && apiName.startsWith("license.") == false && apiName.startsWith("machine_learning.") == false && + apiName.startsWith("rollup.") == false && apiName.startsWith("watcher.") == false && apiName.startsWith("graph.") == false && apiName.startsWith("migration.") == false && diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java new file mode 100644 index 00000000000..5d88b3f2e29 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java @@ -0,0 +1,162 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.rollup.PutRollupJobRequest; +import org.elasticsearch.client.rollup.PutRollupJobResponse; +import org.elasticsearch.client.rollup.job.config.DateHistogramGroupConfig; +import org.elasticsearch.client.rollup.job.config.GroupConfig; +import org.elasticsearch.client.rollup.job.config.MetricConfig; +import org.elasticsearch.client.rollup.job.config.RollupJobConfig; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public class RollupIT extends ESRestHighLevelClientTestCase { + + private static final List SUPPORTED_METRICS = Arrays.asList(MaxAggregationBuilder.NAME, MinAggregationBuilder.NAME, + SumAggregationBuilder.NAME, AvgAggregationBuilder.NAME, ValueCountAggregationBuilder.NAME); + + @SuppressWarnings("unchecked") + public void testPutRollupJob() throws Exception { + double sum = 0.0d; + int max = Integer.MIN_VALUE; + int min = Integer.MAX_VALUE; + + final BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int minute = 0; minute < 60; minute++) { + for (int second = 0; second < 60; second = second + 10) { + final int value = randomIntBetween(0, 100); + + final IndexRequest indexRequest = new IndexRequest("docs", "doc"); + indexRequest.source(jsonBuilder() + .startObject() + .field("value", value) + .field("date", String.format(Locale.ROOT, "2018-01-01T00:%02d:%02dZ", minute, second)) + .endObject()); + bulkRequest.add(indexRequest); + + sum += value; + if (value > max) { + max = value; + } + if (value < min) { + min = value; + } + } + } + + final int numDocs = bulkRequest.numberOfActions(); + + BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); + assertEquals(RestStatus.OK, bulkResponse.status()); + if (bulkResponse.hasFailures()) { + for (BulkItemResponse itemResponse : bulkResponse.getItems()) { + if (itemResponse.isFailed()) { + logger.fatal(itemResponse.getFailureMessage()); + } + } + } + assertFalse(bulkResponse.hasFailures()); + + RefreshResponse refreshResponse = highLevelClient().indices().refresh(new RefreshRequest("docs"), RequestOptions.DEFAULT); + assertEquals(0, refreshResponse.getFailedShards()); + + final String id = randomAlphaOfLength(10); + final String indexPattern = randomFrom("docs", "d*", "doc*"); + final String rollupIndex = randomFrom("rollup", "test"); + final String cron = "*/1 * * * * ?"; + final int pageSize = randomIntBetween(numDocs, numDocs * 10); + // TODO expand this to also test with histogram and terms? + final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY)); + final List metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS)); + final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600)); + + PutRollupJobRequest putRollupJobRequest = + new PutRollupJobRequest(new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groups, metrics, timeout)); + + final RollupClient rollupClient = highLevelClient().rollup(); + PutRollupJobResponse response = execute(putRollupJobRequest, rollupClient::putRollupJob, rollupClient::putRollupJobAsync); + assertTrue(response.isAcknowledged()); + + // TODO Replace this with the Rollup Start Job API + Response startResponse = client().performRequest(new Request("POST", "/_xpack/rollup/job/" + id + "/_start")); + assertEquals(RestStatus.OK.getStatus(), startResponse.getHttpResponse().getStatusLine().getStatusCode()); + + int finalMin = min; + int finalMax = max; + double finalSum = sum; + assertBusy(() -> { + SearchResponse searchResponse = highLevelClient().search(new SearchRequest(rollupIndex), RequestOptions.DEFAULT); + assertEquals(0, searchResponse.getFailedShards()); + assertEquals(1L, searchResponse.getHits().getTotalHits()); + + SearchHit searchHit = searchResponse.getHits().getAt(0); + Map source = searchHit.getSourceAsMap(); + assertNotNull(source); + + assertEquals(numDocs, source.get("date.date_histogram._count")); + assertEquals(groups.getDateHistogram().getInterval().toString(), source.get("date.date_histogram.interval")); + assertEquals(groups.getDateHistogram().getTimeZone(), source.get("date.date_histogram.time_zone")); + + for (MetricConfig metric : metrics) { + for (String name : metric.getMetrics()) { + Number value = (Number) source.get(metric.getField() + "." + name + ".value"); + if ("min".equals(name)) { + assertEquals(finalMin, value.intValue()); + } else if ("max".equals(name)) { + assertEquals(finalMax, value.intValue()); + } else if ("sum".equals(name)) { + assertEquals(finalSum, value.doubleValue(), 0.0d); + } else if ("avg".equals(name)) { + assertEquals(finalSum, value.doubleValue(), 0.0d); + Number avgCount = (Number) source.get(metric.getField() + "." + name + "._count"); + assertEquals(numDocs, avgCount.intValue()); + } else if ("value_count".equals(name)) { + assertEquals(numDocs, value.intValue()); + } + } + } + }); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java new file mode 100644 index 00000000000..aadb0f0f200 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java @@ -0,0 +1,163 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.documentation; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.rollup.PutRollupJobRequest; +import org.elasticsearch.client.rollup.PutRollupJobResponse; +import org.elasticsearch.client.rollup.job.config.DateHistogramGroupConfig; +import org.elasticsearch.client.rollup.job.config.GroupConfig; +import org.elasticsearch.client.rollup.job.config.HistogramGroupConfig; +import org.elasticsearch.client.rollup.job.config.MetricConfig; +import org.elasticsearch.client.rollup.job.config.RollupJobConfig; +import org.elasticsearch.client.rollup.job.config.TermsGroupConfig; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public class RollupDocumentationIT extends ESRestHighLevelClientTestCase { + + @Before + public void setUpDocs() throws IOException { + final BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < 50; i++) { + final IndexRequest indexRequest = new IndexRequest("docs", "doc"); + indexRequest.source(jsonBuilder() + .startObject() + .field("timestamp", String.format(Locale.ROOT, "2018-01-01T00:%02d:00Z", i)) + .field("hostname", 0) + .field("datacenter", 0) + .field("temperature", 0) + .field("voltage", 0) + .field("load", 0) + .field("net_in", 0) + .field("net_out", 0) + .endObject()); + bulkRequest.add(indexRequest); + } + BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); + assertEquals(RestStatus.OK, bulkResponse.status()); + assertFalse(bulkResponse.hasFailures()); + + RefreshResponse refreshResponse = highLevelClient().indices().refresh(new RefreshRequest("docs"), RequestOptions.DEFAULT); + assertEquals(0, refreshResponse.getFailedShards()); + } + + public void testCreateRollupJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + + final String indexPattern = "docs"; + final String rollupIndex = "rollup"; + final String cron = "*/1 * * * * ?"; + final int pageSize = 100; + final TimeValue timeout = null; + + //tag::x-pack-rollup-put-rollup-job-group-config + DateHistogramGroupConfig dateHistogram = + new DateHistogramGroupConfig("timestamp", DateHistogramInterval.HOUR, new DateHistogramInterval("7d"), "UTC"); // <1> + TermsGroupConfig terms = new TermsGroupConfig("hostname", "datacenter"); // <2> + HistogramGroupConfig histogram = new HistogramGroupConfig(5L, "load", "net_in", "net_out"); // <3> + + GroupConfig groups = new GroupConfig(dateHistogram, histogram, terms); // <4> + //end::x-pack-rollup-put-rollup-job-group-config + + //tag::x-pack-rollup-put-rollup-job-metrics-config + List metrics = new ArrayList<>(); // <1> + metrics.add(new MetricConfig("temperature", Arrays.asList("min", "max", "sum"))); // <2> + metrics.add(new MetricConfig("voltage", Arrays.asList("avg", "value_count"))); // <3> + //end::x-pack-rollup-put-rollup-job-metrics-config + { + String id = "job_1"; + + //tag::x-pack-rollup-put-rollup-job-config + RollupJobConfig config = new RollupJobConfig(id, // <1> + indexPattern, // <2> + rollupIndex, // <3> + cron, // <4> + pageSize, // <5> + groups, // <6> + metrics, // <7> + timeout); // <8> + //end::x-pack-rollup-put-rollup-job-config + + //tag::x-pack-rollup-put-rollup-job-request + PutRollupJobRequest request = new PutRollupJobRequest(config); // <1> + //end::x-pack-rollup-put-rollup-job-request + + //tag::x-pack-rollup-put-rollup-job-execute + PutRollupJobResponse response = client.rollup().putRollupJob(request, RequestOptions.DEFAULT); + //end::x-pack-rollup-put-rollup-job-execute + + //tag::x-pack-rollup-put-rollup-job-response + boolean acknowledged = response.isAcknowledged(); // <1> + //end::x-pack-rollup-put-rollup-job-response + assertTrue(acknowledged); + } + { + String id = "job_2"; + RollupJobConfig config = new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groups, metrics, timeout); + PutRollupJobRequest request = new PutRollupJobRequest(config); + // tag::x-pack-rollup-put-rollup-job-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(PutRollupJobResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-rollup-put-rollup-job-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-rollup-put-rollup-job-execute-async + client.rollup().putRollupJobAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-rollup-put-rollup-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobRequestTests.java new file mode 100644 index 00000000000..a49f85a1fed --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobRequestTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.client.rollup.job.config.RollupJobConfig; +import org.elasticsearch.client.rollup.job.config.RollupJobConfigTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.junit.Before; + +import java.io.IOException; + + +public class PutRollupJobRequestTests extends AbstractXContentTestCase { + + private String jobId; + + @Before + public void setUpOptionalId() { + jobId = randomAlphaOfLengthBetween(1, 10); + } + + @Override + protected PutRollupJobRequest createTestInstance() { + return new PutRollupJobRequest(RollupJobConfigTests.randomRollupJobConfig(jobId)); + } + + @Override + protected PutRollupJobRequest doParseInstance(final XContentParser parser) throws IOException { + final String optionalId = randomBoolean() ? jobId : null; + return new PutRollupJobRequest(RollupJobConfig.fromXContent(parser, optionalId)); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + public void testRequireConfiguration() { + final NullPointerException e = expectThrows(NullPointerException.class, ()-> new PutRollupJobRequest(null)); + assertEquals("rollup job configuration is required", e.getMessage()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobResponseTests.java new file mode 100644 index 00000000000..ab8ef93e0c7 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/PutRollupJobResponseTests.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.junit.Before; + +import java.io.IOException; + +public class PutRollupJobResponseTests extends AbstractXContentTestCase { + + private boolean acknowledged; + + @Before + public void setupJobID() { + acknowledged = randomBoolean(); + } + + @Override + protected PutRollupJobResponse createTestInstance() { + return new PutRollupJobResponse(acknowledged); + } + + @Override + protected PutRollupJobResponse doParseInstance(XContentParser parser) throws IOException { + return PutRollupJobResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/DateHistogramGroupConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/DateHistogramGroupConfigTests.java new file mode 100644 index 00000000000..2e6bb3f9154 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/DateHistogramGroupConfigTests.java @@ -0,0 +1,98 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup.job.config; + +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.Optional; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class DateHistogramGroupConfigTests extends AbstractXContentTestCase { + + @Override + protected DateHistogramGroupConfig createTestInstance() { + return randomDateHistogramGroupConfig(); + } + + @Override + protected DateHistogramGroupConfig doParseInstance(final XContentParser parser) throws IOException { + return DateHistogramGroupConfig.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + public void testValidateNullField() { + final DateHistogramGroupConfig config = new DateHistogramGroupConfig(null, DateHistogramInterval.DAY, null, null); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains(is("Field name is required"))); + } + + public void testValidateEmptyField() { + final DateHistogramGroupConfig config = new DateHistogramGroupConfig("", DateHistogramInterval.DAY, null, null); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains(is("Field name is required"))); + } + + public void testValidateNullInterval() { + final DateHistogramGroupConfig config = new DateHistogramGroupConfig("field", null, null, null); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains(is("Interval is required"))); + } + + public void testValidate() { + final DateHistogramGroupConfig config = randomDateHistogramGroupConfig(); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(false)); + } + + static DateHistogramGroupConfig randomDateHistogramGroupConfig() { + final String field = randomAlphaOfLength(randomIntBetween(3, 10)); + final DateHistogramInterval interval = new DateHistogramInterval(randomPositiveTimeValue()); + final DateHistogramInterval delay = randomBoolean() ? new DateHistogramInterval(randomPositiveTimeValue()) : null; + final String timezone = randomBoolean() ? randomDateTimeZone().toString() : null; + return new DateHistogramGroupConfig(field, interval, delay, timezone); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/GroupConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/GroupConfigTests.java new file mode 100644 index 00000000000..b0214907a64 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/GroupConfigTests.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup.job.config; + +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.Optional; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class GroupConfigTests extends AbstractXContentTestCase { + + @Override + protected GroupConfig createTestInstance() { + return randomGroupConfig(); + } + + @Override + protected GroupConfig doParseInstance(final XContentParser parser) throws IOException { + return GroupConfig.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + public void testValidateNullDateHistogramGroupConfig() { + final GroupConfig config = new GroupConfig(null); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains(is("Date histogram must not be null"))); + } + + public void testValidateDateHistogramGroupConfigWithErrors() { + final DateHistogramGroupConfig dateHistogramGroupConfig = new DateHistogramGroupConfig(null, null, null, null); + + final GroupConfig config = new GroupConfig(dateHistogramGroupConfig); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(2)); + assertThat(validationException.validationErrors(), + containsInAnyOrder("Field name is required", "Interval is required")); + } + + public void testValidateHistogramGroupConfigWithErrors() { + final HistogramGroupConfig histogramGroupConfig = new HistogramGroupConfig(0L); + + final GroupConfig config = new GroupConfig(randomGroupConfig().getDateHistogram(), histogramGroupConfig, null); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(2)); + assertThat(validationException.validationErrors(), + containsInAnyOrder("Fields must have at least one value", "Interval must be a positive long")); + } + + public void testValidateTermsGroupConfigWithErrors() { + final TermsGroupConfig termsGroupConfig = new TermsGroupConfig(); + + final GroupConfig config = new GroupConfig(randomGroupConfig().getDateHistogram(), null, termsGroupConfig); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains("Fields must have at least one value")); + } + + public void testValidate() { + final GroupConfig config = randomGroupConfig(); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(false)); + } + + static GroupConfig randomGroupConfig() { + DateHistogramGroupConfig dateHistogram = DateHistogramGroupConfigTests.randomDateHistogramGroupConfig(); + HistogramGroupConfig histogram = randomBoolean() ? HistogramGroupConfigTests.randomHistogramGroupConfig() : null; + TermsGroupConfig terms = randomBoolean() ? TermsGroupConfigTests.randomTermsGroupConfig() : null; + return new GroupConfig(dateHistogram, histogram, terms); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/HistogramGroupConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/HistogramGroupConfigTests.java new file mode 100644 index 00000000000..0d357bc0bb1 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/HistogramGroupConfigTests.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup.job.config; + +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.Optional; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class HistogramGroupConfigTests extends AbstractXContentTestCase { + + @Override + protected HistogramGroupConfig createTestInstance() { + return randomHistogramGroupConfig(); + } + + @Override + protected HistogramGroupConfig doParseInstance(final XContentParser parser) throws IOException { + return HistogramGroupConfig.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + public void testValidateNullFields() { + final HistogramGroupConfig config = new HistogramGroupConfig(60L); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains(is("Fields must have at least one value"))); + } + + public void testValidatEmptyFields() { + final HistogramGroupConfig config = new HistogramGroupConfig(60L, Strings.EMPTY_ARRAY); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains(is("Fields must have at least one value"))); + } + + public void testValidateNegativeInterval() { + final HistogramGroupConfig config = new HistogramGroupConfig(-1L, randomHistogramGroupConfig().getFields()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains(is("Interval must be a positive long"))); + } + + public void testValidateZeroInterval() { + final HistogramGroupConfig config = new HistogramGroupConfig(0L, randomHistogramGroupConfig().getFields()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains(is("Interval must be a positive long"))); + } + + public void testValidate() { + final HistogramGroupConfig config = randomHistogramGroupConfig(); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(false)); + } + static HistogramGroupConfig randomHistogramGroupConfig() { + final long interval = randomNonNegativeLong(); + final String[] fields = new String[randomIntBetween(1, 10)]; + for (int i = 0; i < fields.length; i++) { + fields[i] = randomAlphaOfLength(randomIntBetween(3, 10)); + } + return new HistogramGroupConfig(interval, fields); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/MetricConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/MetricConfigTests.java new file mode 100644 index 00000000000..ac5f63ca3e0 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/MetricConfigTests.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup.job.config; + +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class MetricConfigTests extends AbstractXContentTestCase { + + @Override + protected MetricConfig createTestInstance() { + return randomMetricConfig(); + } + + @Override + protected MetricConfig doParseInstance(final XContentParser parser) throws IOException { + return MetricConfig.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + public void testValidateNullField() { + final MetricConfig config = new MetricConfig(null, randomMetricConfig().getMetrics()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains(is("Field name is required"))); + } + + public void testValidateEmptyField() { + final MetricConfig config = new MetricConfig("", randomMetricConfig().getMetrics()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains(is("Field name is required"))); + } + + public void testValidateNullListOfMetrics() { + final MetricConfig config = new MetricConfig("field", null); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains(is("Metrics must be a non-null, non-empty array of strings"))); + } + + public void testValidateEmptyListOfMetrics() { + final MetricConfig config = new MetricConfig("field", Collections.emptyList()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains(is("Metrics must be a non-null, non-empty array of strings"))); + } + + public void testValidate() { + final MetricConfig config = randomMetricConfig(); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(false)); + } + + static MetricConfig randomMetricConfig() { + final List metrics = new ArrayList<>(); + if (randomBoolean()) { + metrics.add("min"); + } + if (randomBoolean()) { + metrics.add("max"); + } + if (randomBoolean()) { + metrics.add("sum"); + } + if (randomBoolean()) { + metrics.add("avg"); + } + if (randomBoolean()) { + metrics.add("value_count"); + } + if (metrics.size() == 0) { + metrics.add("min"); + } + // large name so we don't accidentally collide + return new MetricConfig(randomAlphaOfLengthBetween(15, 25), Collections.unmodifiableList(metrics)); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/RollupJobConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/RollupJobConfigTests.java new file mode 100644 index 00000000000..c691c21e1f0 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/RollupJobConfigTests.java @@ -0,0 +1,308 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup.job.config; + +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.TimeUnit; + +import static java.util.Collections.singletonList; +import static java.util.Collections.unmodifiableList; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class RollupJobConfigTests extends AbstractXContentTestCase { + + private String id; + + @Before + public void setUpOptionalId() { + id = randomAlphaOfLengthBetween(1, 10); + } + + @Override + protected RollupJobConfig createTestInstance() { + return randomRollupJobConfig(id); + } + + @Override + protected RollupJobConfig doParseInstance(final XContentParser parser) throws IOException { + return RollupJobConfig.fromXContent(parser, randomBoolean() ? id : null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + public void testValidateNullId() { + final RollupJobConfig sample = randomRollupJobConfig(id); + + final RollupJobConfig config = new RollupJobConfig(null, sample.getIndexPattern(), sample.getRollupIndex(), sample.getCron(), + sample.getPageSize(), sample.getGroupConfig(), sample.getMetricsConfig(), sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains("Id must be a non-null, non-empty string")); + } + + public void testValidateEmptyId() { + final RollupJobConfig sample = randomRollupJobConfig(id); + + final RollupJobConfig config = new RollupJobConfig("", sample.getIndexPattern(), sample.getRollupIndex(), sample.getCron(), + sample.getPageSize(), sample.getGroupConfig(), sample.getMetricsConfig(), sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains("Id must be a non-null, non-empty string")); + } + + public void testValidateNullIndexPattern() { + final RollupJobConfig sample = randomRollupJobConfig(id); + + final RollupJobConfig config = new RollupJobConfig(sample.getId(), null, sample.getRollupIndex(), sample.getCron(), + sample.getPageSize(), sample.getGroupConfig(), sample.getMetricsConfig(), sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains("Index pattern must be a non-null, non-empty string")); + } + + public void testValidateEmptyIndexPattern() { + final RollupJobConfig sample = randomRollupJobConfig(id); + + final RollupJobConfig config = new RollupJobConfig(sample.getId(), "", sample.getRollupIndex(), sample.getCron(), + sample.getPageSize(), sample.getGroupConfig(), sample.getMetricsConfig(), sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains("Index pattern must be a non-null, non-empty string")); + } + + public void testValidateMatchAllIndexPattern() { + final RollupJobConfig sample = randomRollupJobConfig(id); + + final RollupJobConfig config = new RollupJobConfig(sample.getId(), "*", sample.getRollupIndex(), sample.getCron(), + sample.getPageSize(), sample.getGroupConfig(), sample.getMetricsConfig(), sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), + contains("Index pattern must not match all indices (as it would match it's own rollup index")); + } + + public void testValidateIndexPatternMatchesRollupIndex() { + final RollupJobConfig sample = randomRollupJobConfig(id); + + final RollupJobConfig config = new RollupJobConfig(sample.getId(), "rollup*", "rollup", sample.getCron(), + sample.getPageSize(), sample.getGroupConfig(), sample.getMetricsConfig(), sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains("Index pattern would match rollup index name which is not allowed")); + } + + public void testValidateSameIndexAndRollupPatterns() { + final RollupJobConfig sample = randomRollupJobConfig(id); + + final RollupJobConfig config = new RollupJobConfig(sample.getId(), "test", "test", sample.getCron(), + sample.getPageSize(), sample.getGroupConfig(), sample.getMetricsConfig(), sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains("Rollup index may not be the same as the index pattern")); + } + + public void testValidateNullRollupPattern() { + final RollupJobConfig sample = randomRollupJobConfig(id); + + final RollupJobConfig config = new RollupJobConfig(sample.getId(), sample.getIndexPattern(), null, sample.getCron(), + sample.getPageSize(), sample.getGroupConfig(), sample.getMetricsConfig(), sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains("Rollup index must be a non-null, non-empty string")); + } + + public void testValidateEmptyRollupPattern() { + final RollupJobConfig sample = randomRollupJobConfig(id); + + final RollupJobConfig config = new RollupJobConfig(sample.getId(), sample.getIndexPattern(), "", sample.getCron(), + sample.getPageSize(), sample.getGroupConfig(), sample.getMetricsConfig(), sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains("Rollup index must be a non-null, non-empty string")); + } + + public void testValidateNullCron() { + final RollupJobConfig sample = randomRollupJobConfig(id); + + final RollupJobConfig config = new RollupJobConfig(sample.getId(), sample.getIndexPattern(), sample.getRollupIndex(), null, + sample.getPageSize(), sample.getGroupConfig(), sample.getMetricsConfig(), sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains("Cron schedule must be a non-null, non-empty string")); + } + + public void testValidateEmptyCron() { + final RollupJobConfig sample = randomRollupJobConfig(id); + + final RollupJobConfig config = new RollupJobConfig(sample.getId(), sample.getIndexPattern(), sample.getRollupIndex(), "", + sample.getPageSize(), sample.getGroupConfig(), sample.getMetricsConfig(), sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains("Cron schedule must be a non-null, non-empty string")); + } + + public void testValidatePageSize() { + final RollupJobConfig sample = randomRollupJobConfig(id); + + final RollupJobConfig config = new RollupJobConfig(sample.getId(), sample.getIndexPattern(), sample.getRollupIndex(), + sample.getCron(), 0, sample.getGroupConfig(), sample.getMetricsConfig(), sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains("Page size is mandatory and must be a positive long")); + } + + public void testValidateGroupOrMetrics() { + final RollupJobConfig sample = randomRollupJobConfig(id); + + final RollupJobConfig config = new RollupJobConfig(sample.getId(), sample.getIndexPattern(), sample.getRollupIndex(), + sample.getCron(), sample.getPageSize(), null, null, sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains("At least one grouping or metric must be configured")); + } + + public void testValidateGroupConfigWithErrors() { + final GroupConfig groupConfig = new GroupConfig(null); + + final RollupJobConfig sample = randomRollupJobConfig(id); + final RollupJobConfig config = new RollupJobConfig(sample.getId(), sample.getIndexPattern(), sample.getRollupIndex(), + sample.getCron(), sample.getPageSize(), groupConfig, sample.getMetricsConfig(), sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains("Date histogram must not be null")); + } + + public void testValidateListOfMetricsWithErrors() { + final List metricsConfigs = singletonList(new MetricConfig(null, null)); + + final RollupJobConfig sample = randomRollupJobConfig(id); + final RollupJobConfig config = new RollupJobConfig(sample.getId(), sample.getIndexPattern(), sample.getRollupIndex(), + sample.getCron(), sample.getPageSize(), sample.getGroupConfig(), metricsConfigs, sample.getTimeout()); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(2)); + assertThat(validationException.validationErrors(), + containsInAnyOrder("Field name is required", "Metrics must be a non-null, non-empty array of strings")); + } + + public static RollupJobConfig randomRollupJobConfig(final String id) { + final String indexPattern = randomAlphaOfLengthBetween(5, 20); + final String rollupIndex = "rollup_" + indexPattern; + final String cron = randomCron(); + final int pageSize = randomIntBetween(1, 100); + final TimeValue timeout = randomBoolean() ? null : + new TimeValue(randomIntBetween(0, 60), randomFrom(Arrays.asList(TimeUnit.MILLISECONDS, TimeUnit.SECONDS, TimeUnit.MINUTES))); + final GroupConfig groups = GroupConfigTests.randomGroupConfig(); + + final List metrics = new ArrayList<>(); + if (randomBoolean()) { + final int numMetrics = randomIntBetween(1, 10); + for (int i = 0; i < numMetrics; i++) { + metrics.add(MetricConfigTests.randomMetricConfig()); + } + } + return new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groups, unmodifiableList(metrics), timeout); + } + + private static String randomCron() { + return (ESTestCase.randomBoolean() ? "*" : String.valueOf(ESTestCase.randomIntBetween(0, 59))) + //second + " " + (ESTestCase.randomBoolean() ? "*" : String.valueOf(ESTestCase.randomIntBetween(0, 59))) + //minute + " " + (ESTestCase.randomBoolean() ? "*" : String.valueOf(ESTestCase.randomIntBetween(0, 23))) + //hour + " " + (ESTestCase.randomBoolean() ? "*" : String.valueOf(ESTestCase.randomIntBetween(1, 31))) + //day of month + " " + (ESTestCase.randomBoolean() ? "*" : String.valueOf(ESTestCase.randomIntBetween(1, 12))) + //month + " ?" + //day of week + " " + (ESTestCase.randomBoolean() ? "*" : String.valueOf(ESTestCase.randomIntBetween(1970, 2199))); //year + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/TermsGroupConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/TermsGroupConfigTests.java new file mode 100644 index 00000000000..3c761030cf5 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/job/config/TermsGroupConfigTests.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup.job.config; + +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.Optional; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class TermsGroupConfigTests extends AbstractXContentTestCase { + + @Override + protected TermsGroupConfig createTestInstance() { + return randomTermsGroupConfig(); + } + + @Override + protected TermsGroupConfig doParseInstance(final XContentParser parser) throws IOException { + return TermsGroupConfig.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + public void testValidateNullFields() { + final TermsGroupConfig config = new TermsGroupConfig(); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains(is("Fields must have at least one value"))); + } + + public void testValidatEmptyFields() { + final TermsGroupConfig config = new TermsGroupConfig(Strings.EMPTY_ARRAY); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(true)); + ValidationException validationException = validation.get(); + assertThat(validationException.validationErrors().size(), is(1)); + assertThat(validationException.validationErrors(), contains(is("Fields must have at least one value"))); + } + + public void testValidate() { + final TermsGroupConfig config = randomTermsGroupConfig(); + + Optional validation = config.validate(); + assertThat(validation, notNullValue()); + assertThat(validation.isPresent(), is(false)); + } + + static TermsGroupConfig randomTermsGroupConfig() { + final String[] fields = new String[randomIntBetween(1, 10)]; + for (int i = 0; i < fields.length; i++) { + fields[i] = randomAlphaOfLength(randomIntBetween(3, 10)); + } + return new TermsGroupConfig(fields); + } +} diff --git a/docs/java-rest/high-level/rollup/put_job.asciidoc b/docs/java-rest/high-level/rollup/put_job.asciidoc new file mode 100644 index 00000000000..6d8f0352ef2 --- /dev/null +++ b/docs/java-rest/high-level/rollup/put_job.asciidoc @@ -0,0 +1,172 @@ +[[java-rest-high-x-pack-rollup-put-job]] +=== Put Rollup Job API + +The Put Rollup Job API can be used to create a new Rollup job +in the cluster. The API accepts a `PutRollupJobRequest` object +as a request and returns a `PutRollupJobResponse`. + +[[java-rest-high-x-pack-rollup-put-rollup-job-request]] +==== Put Rollup Job Request + +A `PutRollupJobRequest` requires the following argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup-job-request] +-------------------------------------------------- +<1> The configuration of the Rollup job to create as a `RollupJobConfig` + +[[java-rest-high-x-pack-rollup-put-rollup-job-config]] +==== Rollup Job Configuration + +The `RollupJobConfig` object contains all the details about the rollup job +configuration. See <> to learn more +about the various configuration settings. + +A `RollupJobConfig` requires the following arguments: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup-job-config] +-------------------------------------------------- +<1> The name of the Rollup job +<2> The index (or index pattern) to rollup +<3> The index to store rollup results into +<4> A cron expression which defines when the Rollup job should be executed +<5> The page size to use for the Rollup job +<6> The grouping configuration of the Rollup job as a `GroupConfig` +<7> The metrics configuration of the Rollup job as a list of `MetricConfig` +<8> The timeout value to use for the Rollup job as a `TimeValue` + + +[[java-rest-high-x-pack-rollup-put-rollup-job-group-config]] +==== Grouping Configuration + +The grouping configuration of the Rollup job is defined in the `RollupJobConfig` +using a `GroupConfig` instance. `GroupConfig` reflects all the configuration +settings that can be defined using the REST API. See <> +to learn more about these settings. + +Using the REST API, we could define this grouping configuration: + +[source,js] +-------------------------------------------------- +"groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d", + "time_zone": "UTC" + }, + "terms": { + "fields": ["hostname", "datacenter"] + }, + "histogram": { + "fields": ["load", "net_in", "net_out"], + "interval": 5 + } +} +-------------------------------------------------- +// NOTCONSOLE + +Using the `GroupConfig` object and the high level REST client, the same +configuration would be: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup-job-group-config] +-------------------------------------------------- +<1> The date histogram aggregation to use to rollup up documents, as a `DateHistogramGroupConfig` +<2> The terms aggregation to use to rollup up documents, as a `TermsGroupConfig` +<3> The histogram aggregation to use to rollup up documents, as a `HistogramGroupConfig` +<4> The grouping configuration as a `GroupConfig` + + +[[java-rest-high-x-pack-rollup-put-rollup-job-metrics-config]] +==== Metrics Configuration + +After defining which groups should be generated for the data, you next configure +which metrics should be collected. The list of metrics is defined in the `RollupJobConfig` +using a `List` instance. `MetricConfig` reflects all the configuration +settings that can be defined using the REST API. See <> +to learn more about these settings. + +Using the REST API, we could define this metrics configuration: + +[source,js] +-------------------------------------------------- +"metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg", "value_count"] + } +] +-------------------------------------------------- +// NOTCONSOLE + +Using the `MetricConfig` object and the high level REST client, the same +configuration would be: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup-job-metrics-config] +-------------------------------------------------- +<1> The list of `MetricConfig` to configure in the `RollupJobConfig` +<2> Adds the metrics to compute on the `temperature` field +<3> Adds the metrics to compute on the `voltage` field + + +[[java-rest-high-x-pack-rollup-put-rollup-job-execution]] +==== Execution + +The Put Rollup Job API can be executed through a `RollupClient` +instance. Such instance can be retrieved from a `RestHighLevelClient` +using the `rollup()` method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup-job-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-rollup-put-rollup-job-response]] +==== Response + +The returned `PutRollupJobResponse` indicates if the new Rollup job +has been successfully created: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup-job-response] +-------------------------------------------------- +<1> `acknowledged` is a boolean indicating whether the job was successfully created + +[[java-rest-high-x-pack-rollup-put-rollup-job-async]] +==== Asynchronous Execution + +This request can be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup-job-execute-async] +-------------------------------------------------- +<1> The `PutRollupJobRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `PutRollupJobResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup-job-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 3619bc9e788..78a9f0bc7c2 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -262,6 +262,14 @@ The Java High Level REST Client supports the following Migration APIs: include::migration/get-assistance.asciidoc[] +== Rollup APIs + +The Java High Level REST Client supports the following Rollup APIs: + +* <> + +include::rollup/put_job.asciidoc[] + == Security APIs The Java High Level REST Client supports the following Security APIs: diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestPutRollupJobAction.java index 231e382827e..a2fda29c508 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestPutRollupJobAction.java @@ -7,9 +7,7 @@ package org.elasticsearch.xpack.rollup.rest; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -20,7 +18,6 @@ import org.elasticsearch.xpack.rollup.Rollup; import java.io.IOException; public class RestPutRollupJobAction extends BaseRestHandler { - public static final ParseField ID = new ParseField("id"); public RestPutRollupJobAction(Settings settings, RestController controller) { super(settings); @@ -28,13 +25,10 @@ public class RestPutRollupJobAction extends BaseRestHandler { } @Override - protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { - String id = restRequest.param(ID.getPreferredName()); - XContentParser parser = restRequest.contentParser(); - - PutRollupJobAction.Request request = PutRollupJobAction.Request.fromXContent(parser, id); - - return channel -> client.execute(PutRollupJobAction.INSTANCE, request, new RestToXContentListener<>(channel)); + protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + final String id = request.param("id"); + final PutRollupJobAction.Request putRollupJobRequest = PutRollupJobAction.Request.fromXContent(request.contentParser(), id); + return channel -> client.execute(PutRollupJobAction.INSTANCE, putRollupJobRequest, new RestToXContentListener<>(channel)); } @Override From 6b0fc5382a41d87473a14c3aa846e10cb17ce27b Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 17 Sep 2018 09:22:52 +0100 Subject: [PATCH 12/27] Suppress DeadHostStateTests on Windows --- .../test/java/org/elasticsearch/client/DeadHostStateTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java index daea27f8963..7d21f1cbe7c 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/DeadHostStateTests.java @@ -36,6 +36,7 @@ public class DeadHostStateTests extends RestClientTestCase { private static long[] EXPECTED_TIMEOUTS_SECONDS = new long[]{60, 84, 120, 169, 240, 339, 480, 678, 960, 1357, 1800}; public void testInitialDeadHostStateDefaultTimeSupplier() { + assumeFalse("https://github.com/elastic/elasticsearch/issues/33747", System.getProperty("os.name").startsWith("Windows")); DeadHostState deadHostState = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); long currentTime = System.nanoTime(); assertThat(deadHostState.getDeadUntilNanos(), greaterThan(currentTime)); @@ -54,6 +55,7 @@ public class DeadHostStateTests extends RestClientTestCase { } public void testCompareToDefaultTimeSupplier() { + assumeFalse("https://github.com/elastic/elasticsearch/issues/33747", System.getProperty("os.name").startsWith("Windows")); int numObjects = randomIntBetween(EXPECTED_TIMEOUTS_SECONDS.length, 30); DeadHostState[] deadHostStates = new DeadHostState[numObjects]; for (int i = 0; i < numObjects; i++) { From 896b3864a87bfcb39c937cc2ee583767b1844870 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Mon, 17 Sep 2018 12:04:57 +0300 Subject: [PATCH 13/27] Favor running compile tasks before pre-commit (#33424) --- .../gradle/precommit/PrecommitTasks.groovy | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 06557d4ccfd..8c3fb7c7e84 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -21,7 +21,6 @@ package org.elasticsearch.gradle.precommit import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask import org.gradle.api.Project import org.gradle.api.Task -import org.gradle.api.artifacts.Configuration import org.gradle.api.plugins.JavaBasePlugin import org.gradle.api.plugins.quality.Checkstyle /** @@ -70,14 +69,19 @@ class PrecommitTasks { precommitTasks.add(configureLoggerUsage(project)) } + // We want to get any compilation error before running the pre-commit checks. + project.sourceSets.all { sourceSet -> + precommitTasks.each { task -> + task.shouldRunAfter(sourceSet.getClassesTaskName()) + } + } - Map precommitOptions = [ + return project.tasks.create([ name: 'precommit', group: JavaBasePlugin.VERIFICATION_GROUP, description: 'Runs all non-test checks.', dependsOn: precommitTasks - ] - return project.tasks.create(precommitOptions) + ]) } private static Task configureJarHell(Project project) { From 1f2a90cb3937dcc6a6164e24b3a5350d69283f91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Mon, 17 Sep 2018 11:15:55 +0200 Subject: [PATCH 14/27] Mute DateTimeUnitTests.testConversion --- .../org/elasticsearch/common/rounding/DateTimeUnitTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java b/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java index f188eb4cac6..2723be86998 100644 --- a/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java +++ b/server/src/test/java/org/elasticsearch/common/rounding/DateTimeUnitTests.java @@ -66,6 +66,7 @@ public class DateTimeUnitTests extends ESTestCase { assertEquals(SECOND_OF_MINUTE, DateTimeUnit.resolve((byte) 8)); } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/33749") public void testConversion() { long millis = randomLongBetween(0, Instant.now().toEpochMilli()); DateTimeZone zone = randomDateTimeZone(); From 5f9370f0ecbf319e5201a82e60fde59c6de003f0 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Mon, 17 Sep 2018 12:38:51 +0300 Subject: [PATCH 15/27] [TESTS] Mute SSLDriverTests in JDK11 Relates: https://github.com/elastic/elasticsearch/issues/33751 --- .../xpack/security/transport/nio/SSLDriverTests.java | 3 +++ 1 file changed, 3 insertions(+) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLDriverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLDriverTests.java index 303ed92130a..5ff154d6562 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLDriverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLDriverTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.security.transport.nio; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ssl.CertParsingUtils; @@ -123,6 +124,8 @@ public class SSLDriverTests extends ESTestCase { } public void testHandshakeFailureBecauseProtocolMismatch() throws Exception { + // See https://github.com/elastic/elasticsearch/issues/33751 + assumeTrue("test fails on JDK 11 >= ea28 currently", JavaVersion.current().compareTo(JavaVersion.parse("11")) < 0); SSLContext sslContext = getSSLContext(); SSLEngine clientEngine = sslContext.createSSLEngine(); SSLEngine serverEngine = sslContext.createSSLEngine(); From b06a082725ee8ab5e3755cb7990ca3bc94ae15a0 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Mon, 17 Sep 2018 11:49:22 +0200 Subject: [PATCH 16/27] Improve reproducibility of BigArraysTests. Close #33750 --- .../test/java/org/elasticsearch/common/util/BigArraysTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index ba92ba47827..d0c051e03bc 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -355,6 +355,7 @@ public class BigArraysTests extends ESTestCase { HierarchyCircuitBreakerService hcbs = new HierarchyCircuitBreakerService( Settings.builder() .put(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), maxSize, ByteSizeUnit.BYTES) + .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); BigArrays bigArrays = new BigArrays(null, hcbs, false).withCircuitBreaking(); @@ -412,6 +413,7 @@ public class BigArraysTests extends ESTestCase { HierarchyCircuitBreakerService hcbs = new HierarchyCircuitBreakerService( Settings.builder() .put(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), maxSize, ByteSizeUnit.BYTES) + .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); BigArrays bigArrays = new BigArrays(null, hcbs, false); From baa69a5ed50fc255c7656083741c72c9b990c3e4 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 17 Sep 2018 13:02:11 +0200 Subject: [PATCH 17/27] [Docs] Fix broken links in HLRC Rollup documentation Introduced in #33521 --- docs/java-rest/high-level/rollup/put_job.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/java-rest/high-level/rollup/put_job.asciidoc b/docs/java-rest/high-level/rollup/put_job.asciidoc index 6d8f0352ef2..b01d65cb19b 100644 --- a/docs/java-rest/high-level/rollup/put_job.asciidoc +++ b/docs/java-rest/high-level/rollup/put_job.asciidoc @@ -20,7 +20,7 @@ include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup- ==== Rollup Job Configuration The `RollupJobConfig` object contains all the details about the rollup job -configuration. See <> to learn more +configuration. See <> to learn more about the various configuration settings. A `RollupJobConfig` requires the following arguments: @@ -44,7 +44,7 @@ include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup- The grouping configuration of the Rollup job is defined in the `RollupJobConfig` using a `GroupConfig` instance. `GroupConfig` reflects all the configuration -settings that can be defined using the REST API. See <> +settings that can be defined using the REST API. See <> to learn more about these settings. Using the REST API, we could define this grouping configuration: @@ -88,7 +88,7 @@ include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup- After defining which groups should be generated for the data, you next configure which metrics should be collected. The list of metrics is defined in the `RollupJobConfig` using a `List` instance. `MetricConfig` reflects all the configuration -settings that can be defined using the REST API. See <> +settings that can be defined using the REST API. See <> to learn more about these settings. Using the REST API, we could define this metrics configuration: From 5ca6f312058ec8c7fbac072c4648246e7dfb4957 Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Mon, 17 Sep 2018 14:09:28 +0300 Subject: [PATCH 18/27] Move precommit task implementation to java (#33407) Replace precommit tasks that execute with Java implementations --- .../gradle/precommit/JarHellTask.groovy | 67 ------- .../gradle/precommit/LoggerUsageTask.groovy | 108 ----------- .../gradle/precommit/PrecommitTasks.groovy | 18 +- .../org/elasticsearch/gradle/LoggedExec.java | 45 ++++- .../precommit/ForbiddenApisCliTask.java | 21 +- .../gradle/precommit/JarHellTask.java | 68 +++++++ .../gradle/precommit/LoggerUsageTask.java | 87 +++++++++ .../precommit/NamingConventionsTask.java | 181 ++++++++---------- .../gradle/precommit/PrecommitTask.java | 43 +++++ test/framework/build.gradle | 1 + 10 files changed, 336 insertions(+), 303 deletions(-) delete mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy delete mode 100644 buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java create mode 100644 buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy deleted file mode 100644 index 119a0276499..00000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/JarHellTask.groovy +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.gradle.precommit - -import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin -import org.elasticsearch.gradle.LoggedExec -import org.gradle.api.file.FileCollection -import org.gradle.api.tasks.Classpath -import org.gradle.api.tasks.OutputFile -/** - * Runs CheckJarHell on a classpath. - */ -public class JarHellTask extends LoggedExec { - - /** - * We use a simple "marker" file that we touch when the task succeeds - * as the task output. This is compared against the modified time of the - * inputs (ie the jars/class files). - */ - @OutputFile - File successMarker - - @Classpath - FileCollection classpath - - public JarHellTask() { - successMarker = new File(project.buildDir, 'markers/jarHell-' + getName()) - project.afterEvaluate { - FileCollection classpath = project.sourceSets.test.runtimeClasspath - if (project.plugins.hasPlugin(ShadowPlugin)) { - classpath += project.configurations.bundle - } - inputs.files(classpath) - dependsOn(classpath) - description = "Runs CheckJarHell on ${classpath}" - executable = new File(project.runtimeJavaHome, 'bin/java') - doFirst({ - /* JarHell doesn't like getting directories that don't exist but - gradle isn't especially careful about that. So we have to do it - filter it ourselves. */ - FileCollection taskClasspath = classpath.filter { it.exists() } - args('-cp', taskClasspath.asPath, 'org.elasticsearch.bootstrap.JarHell') - }) - doLast({ - successMarker.parentFile.mkdirs() - successMarker.setText("", 'UTF-8') - }) - } - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy deleted file mode 100644 index ac1e12620af..00000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LoggerUsageTask.groovy +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.gradle.precommit - -import org.elasticsearch.gradle.LoggedExec -import org.gradle.api.file.FileCollection -import org.gradle.api.tasks.InputFiles -import org.gradle.api.tasks.OutputFile - -/** - * Runs LoggerUsageCheck on a set of directories. - */ -public class LoggerUsageTask extends LoggedExec { - - /** - * We use a simple "marker" file that we touch when the task succeeds - * as the task output. This is compared against the modified time of the - * inputs (ie the jars/class files). - */ - private File successMarker = new File(project.buildDir, 'markers/loggerUsage') - - private FileCollection classpath; - - private FileCollection classDirectories; - - public LoggerUsageTask() { - project.afterEvaluate { - dependsOn(classpath) - description = "Runs LoggerUsageCheck on ${classDirectories}" - executable = new File(project.runtimeJavaHome, 'bin/java') - if (classDirectories == null) { - // Default to main and test class files - List files = [] - // But only if the source sets that will make them exist - if (project.sourceSets.findByName("main")) { - files.addAll(project.sourceSets.main.output.classesDirs.getFiles()) - dependsOn project.tasks.classes - } - if (project.sourceSets.findByName("test")) { - files.addAll(project.sourceSets.test.output.classesDirs.getFiles()) - dependsOn project.tasks.testClasses - } - /* In an extra twist, it isn't good enough that the source set - * exists. Empty source sets won't make a classes directory - * which will cause the check to fail. We have to filter the - * empty directories out manually. This filter is done right - * before the actual logger usage check giving the rest of the - * build the opportunity to actually build the directory. - */ - classDirectories = project.files(files).filter { it.exists() } - } - doFirst({ - args('-cp', getClasspath().asPath, 'org.elasticsearch.test.loggerusage.ESLoggerUsageChecker') - getClassDirectories().each { - args it.getAbsolutePath() - } - }) - doLast({ - successMarker.parentFile.mkdirs() - successMarker.setText("", 'UTF-8') - }) - } - } - - @InputFiles - FileCollection getClasspath() { - return classpath - } - - void setClasspath(FileCollection classpath) { - this.classpath = classpath - } - - @InputFiles - FileCollection getClassDirectories() { - return classDirectories - } - - void setClassDirectories(FileCollection classDirectories) { - this.classDirectories = classDirectories - } - - @OutputFile - File getSuccessMarker() { - return successMarker - } - - void setSuccessMarker(File successMarker) { - this.successMarker = successMarker - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 8c3fb7c7e84..e89d05e8508 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.precommit +import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask import org.gradle.api.Project import org.gradle.api.Task @@ -87,6 +88,11 @@ class PrecommitTasks { private static Task configureJarHell(Project project) { Task task = project.tasks.create('jarHell', JarHellTask.class) task.classpath = project.sourceSets.test.runtimeClasspath + if (project.plugins.hasPlugin(ShadowPlugin)) { + task.classpath += project.configurations.bundle + } + task.dependsOn(project.sourceSets.test.classesTaskName) + task.javaHome = project.runtimeJavaHome return task } @@ -205,22 +211,20 @@ class PrecommitTasks { private static Task configureNamingConventions(Project project) { if (project.sourceSets.findByName("test")) { - return project.tasks.create('namingConventions', NamingConventionsTask) + Task namingConventionsTask = project.tasks.create('namingConventions', NamingConventionsTask) + namingConventionsTask.javaHome = project.runtimeJavaHome + return namingConventionsTask } return null } private static Task configureLoggerUsage(Project project) { - Task loggerUsageTask = project.tasks.create('loggerUsageCheck', LoggerUsageTask.class) - project.configurations.create('loggerUsagePlugin') project.dependencies.add('loggerUsagePlugin', "org.elasticsearch.test:logger-usage:${org.elasticsearch.gradle.VersionProperties.elasticsearch}") - - loggerUsageTask.configure { + return project.tasks.create('loggerUsageCheck', LoggerUsageTask.class) { classpath = project.configurations.loggerUsagePlugin + javaHome = project.runtimeJavaHome } - - return loggerUsageTask } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java b/buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java index 4eab1232ceb..644a3e7e134 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/LoggedExec.java @@ -1,10 +1,17 @@ package org.elasticsearch.gradle; +import org.gradle.api.Action; import org.gradle.api.GradleException; +import org.gradle.api.Project; import org.gradle.api.tasks.Exec; +import org.gradle.process.BaseExecSpec; +import org.gradle.process.ExecResult; +import org.gradle.process.ExecSpec; +import org.gradle.process.JavaExecSpec; import java.io.ByteArrayOutputStream; import java.io.UnsupportedEncodingException; +import java.util.function.Function; /** * A wrapper around gradle's Exec task to capture output and log on error. @@ -12,9 +19,8 @@ import java.io.UnsupportedEncodingException; @SuppressWarnings("unchecked") public class LoggedExec extends Exec { - protected ByteArrayOutputStream output = new ByteArrayOutputStream(); - public LoggedExec() { + ByteArrayOutputStream output = new ByteArrayOutputStream(); if (getLogger().isInfoEnabled() == false) { setStandardOutput(output); setErrorOutput(output); @@ -41,4 +47,39 @@ public class LoggedExec extends Exec { ); } } + + public static ExecResult exec(Project project, Action action) { + return genericExec(project, project::exec, action); + } + + public static ExecResult javaexec(Project project, Action action) { + return genericExec(project, project::javaexec, action); + } + + private static ExecResult genericExec( + Project project, + Function,ExecResult> function, + Action action + ) { + if (project.getLogger().isInfoEnabled()) { + return function.apply(action); + } + ByteArrayOutputStream output = new ByteArrayOutputStream(); + try { + return function.apply(spec -> { + spec.setStandardOutput(output); + spec.setErrorOutput(output); + action.execute(spec); + }); + } catch (Exception e) { + try { + for (String line : output.toString("UTF-8").split("\\R")) { + project.getLogger().error(line); + } + } catch (UnsupportedEncodingException ue) { + throw new GradleException("Failed to read exec output", ue); + } + throw e; + } + } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java index aaa9564b0dc..f88fff24be5 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/ForbiddenApisCliTask.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.gradle.precommit; -import org.gradle.api.DefaultTask; +import org.elasticsearch.gradle.LoggedExec; import org.gradle.api.JavaVersion; import org.gradle.api.artifacts.Configuration; import org.gradle.api.file.FileCollection; @@ -26,22 +26,18 @@ import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFiles; -import org.gradle.api.tasks.OutputFile; import org.gradle.api.tasks.SkipWhenEmpty; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.TaskAction; import org.gradle.process.JavaExecSpec; import java.io.File; -import java.io.IOException; -import java.nio.file.Files; import java.util.ArrayList; -import java.util.Collections; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; -public class ForbiddenApisCliTask extends DefaultTask { +public class ForbiddenApisCliTask extends PrecommitTask { private final Logger logger = Logging.getLogger(ForbiddenApisCliTask.class); private FileCollection signaturesFiles; @@ -71,14 +67,6 @@ public class ForbiddenApisCliTask extends DefaultTask { } } - @OutputFile - public File getMarkerFile() { - return new File( - new File(getProject().getBuildDir(), "precommit"), - getName() - ); - } - @InputFiles @SkipWhenEmpty public FileCollection getClassesDirs() { @@ -152,8 +140,8 @@ public class ForbiddenApisCliTask extends DefaultTask { } @TaskAction - public void runForbiddenApisAndWriteMarker() throws IOException { - getProject().javaexec((JavaExecSpec spec) -> { + public void runForbiddenApisAndWriteMarker() { + LoggedExec.javaexec(getProject(), (JavaExecSpec spec) -> { spec.classpath( getForbiddenAPIsConfiguration(), getClassPathFromSourceSet() @@ -184,7 +172,6 @@ public class ForbiddenApisCliTask extends DefaultTask { spec.args("-d", dir) ); }); - Files.write(getMarkerFile().toPath(), Collections.emptyList()); } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java new file mode 100644 index 00000000000..fd5b0c57907 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/JarHellTask.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.precommit; + +import org.elasticsearch.gradle.LoggedExec; +import org.gradle.api.file.FileCollection; +import org.gradle.api.tasks.Classpath; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.TaskAction; + +/** + * Runs CheckJarHell on a classpath. + */ +public class JarHellTask extends PrecommitTask { + + private FileCollection classpath; + + private Object javaHome; + + public JarHellTask() { + setDescription("Runs CheckJarHell on the configured classpath"); + } + + @TaskAction + public void runJarHellCheck() { + LoggedExec.javaexec(getProject(), spec -> { + spec.classpath(getClasspath()); + spec.executable(getJavaHome() + "/bin/java"); + spec.setMain("org.elasticsearch.bootstrap.JarHell"); + }); + } + + @Input + public Object getJavaHome() { + return javaHome; + } + + public void setJavaHome(Object javaHome) { + this.javaHome = javaHome; + } + + @Classpath + public FileCollection getClasspath() { + return classpath.filter(file -> file.exists()); + } + + public void setClasspath(FileCollection classpath) { + this.classpath = classpath; + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java new file mode 100644 index 00000000000..fb1831bda4d --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LoggerUsageTask.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle.precommit; + +import org.elasticsearch.gradle.LoggedExec; +import org.gradle.api.file.FileCollection; +import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.tasks.Classpath; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.SkipWhenEmpty; +import org.gradle.api.tasks.TaskAction; + +import java.io.File; + +/** + * Runs LoggerUsageCheck on a set of directories. + */ +public class LoggerUsageTask extends PrecommitTask { + + public LoggerUsageTask() { + setDescription("Runs LoggerUsageCheck on output directories of all source sets"); + getProject().getConvention().getPlugin(JavaPluginConvention.class).getSourceSets().all(sourceSet -> { + dependsOn(sourceSet.getClassesTaskName()); + }); + } + + @TaskAction + public void runLoggerUsageTask() { + LoggedExec.javaexec(getProject(), spec -> { + spec.setMain("org.elasticsearch.test.loggerusage.ESLoggerUsageChecker"); + spec.classpath(getClasspath()); + spec.executable(getJavaHome() + "/bin/java"); + getClassDirectories().forEach(spec::args); + }); + } + + @Classpath + public FileCollection getClasspath() { + return classpath; + } + + public void setClasspath(FileCollection classpath) { + this.classpath = classpath; + } + + @InputFiles + @SkipWhenEmpty + public FileCollection getClassDirectories() { + return getProject().getConvention().getPlugin(JavaPluginConvention.class).getSourceSets().stream() + // Don't pick up all source sets like the java9 ones as logger-check doesn't support the class format + .filter(sourceSet -> sourceSet.getName().equals("main") || sourceSet.getName().equals("test")) + .map(sourceSet -> sourceSet.getOutput().getClassesDirs()) + .reduce(FileCollection::plus) + .orElse(getProject().files()) + .filter(File::exists); + } + + @Input + public Object getJavaHome() { + return javaHome; + } + + public void setJavaHome(Object javaHome) { + this.javaHome = javaHome; + } + + private FileCollection classpath; + private Object javaHome; +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/NamingConventionsTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/NamingConventionsTask.java index 297586e9ac6..b0e36982918 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/NamingConventionsTask.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/NamingConventionsTask.java @@ -1,24 +1,20 @@ package org.elasticsearch.gradle.precommit; -import groovy.lang.Closure; import org.elasticsearch.gradle.LoggedExec; import org.elasticsearch.test.NamingConventionsCheck; import org.gradle.api.GradleException; -import org.gradle.api.Project; -import org.gradle.api.Task; import org.gradle.api.file.FileCollection; -import org.gradle.api.plugins.ExtraPropertiesExtension; import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.tasks.Classpath; import org.gradle.api.tasks.Input; -import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.InputFiles; +import org.gradle.api.tasks.SkipWhenEmpty; import org.gradle.api.tasks.SourceSetContainer; +import org.gradle.api.tasks.TaskAction; import java.io.File; -import java.io.FileWriter; -import java.io.IOException; import java.net.URISyntaxException; import java.net.URL; -import java.util.Objects; /** * Runs NamingConventionsCheck on a classpath/directory combo to verify that @@ -26,102 +22,83 @@ import java.util.Objects; * gradle. Read the Javadoc for NamingConventionsCheck to learn more. */ @SuppressWarnings("unchecked") -public class NamingConventionsTask extends LoggedExec { +public class NamingConventionsTask extends PrecommitTask { + public NamingConventionsTask() { setDescription("Tests that test classes aren't misnamed or misplaced"); - final Project project = getProject(); + dependsOn(getJavaSourceSets().getByName(checkForTestsInMain ? "main" : "test").getClassesTaskName()); + } - SourceSetContainer sourceSets = getJavaSourceSets(); - final FileCollection classpath; - try { - URL location = NamingConventionsCheck.class.getProtectionDomain().getCodeSource().getLocation(); - if (location.getProtocol().equals("file") == false) { - throw new GradleException("Unexpected location for NamingConventionCheck class: "+ location); - } - classpath = project.files( - // This works because the class only depends on one class from junit that will be available from the - // tests compile classpath. It's the most straight forward way of telling Java where to find the main - // class. - location.toURI().getPath(), - // the tests to be loaded - checkForTestsInMain ? sourceSets.getByName("main").getRuntimeClasspath() : project.files(), - sourceSets.getByName("test").getCompileClasspath(), - sourceSets.getByName("test").getOutput() + @TaskAction + public void runNamingConventions() { + LoggedExec.javaexec(getProject(), spec -> { + spec.classpath( + getNamingConventionsCheckClassFiles(), + getSourceSetClassPath() ); + spec.executable(getJavaHome() + "/bin/java"); + spec.jvmArgs("-Djna.nosys=true"); + spec.setMain(NamingConventionsCheck.class.getName()); + spec.args("--test-class", getTestClass()); + if (isSkipIntegTestInDisguise()) { + spec.args("--skip-integ-tests-in-disguise"); + } else { + spec.args("--integ-test-class", getIntegTestClass()); + } + if (isCheckForTestsInMain()) { + spec.args("--main"); + spec.args("--"); + } else { + spec.args("--"); + } + spec.args(getExistingClassesDirs().getAsPath()); + }); + } + + @Input + public Object getJavaHome() { + return javaHome; + } + + public void setJavaHome(Object javaHome) { + this.javaHome = javaHome; + } + + @Classpath + public FileCollection getSourceSetClassPath() { + SourceSetContainer sourceSets = getJavaSourceSets(); + return getProject().files( + sourceSets.getByName("test").getCompileClasspath(), + sourceSets.getByName("test").getOutput(), + checkForTestsInMain ? sourceSets.getByName("main").getRuntimeClasspath() : getProject().files() + ); + } + + @InputFiles + public File getNamingConventionsCheckClassFiles() { + // This works because the class only depends on one class from junit that will be available from the + // tests compile classpath. It's the most straight forward way of telling Java where to find the main + // class. + URL location = NamingConventionsCheck.class.getProtectionDomain().getCodeSource().getLocation(); + if (location.getProtocol().equals("file") == false) { + throw new GradleException("Unexpected location for NamingConventionCheck class: "+ location); + } + try { + return new File(location.toURI().getPath()); } catch (URISyntaxException e) { throw new AssertionError(e); } - dependsOn(project.getTasks().matching(it -> "testCompileClasspath".equals(it.getName()))); - getInputs().files(classpath); - - setExecutable(new File( - Objects.requireNonNull( - project.getExtensions().getByType(ExtraPropertiesExtension.class).get("runtimeJavaHome") - ).toString(), - "bin/java") - ); - - if (checkForTestsInMain == false) { - /* This task is created by default for all subprojects with this - * setting and there is no point in running it if the files don't - * exist. */ - onlyIf((unused) -> getExistingClassesDirs().isEmpty() == false); - } - - /* - * We build the arguments in a funny afterEvaluate/doFirst closure so that we can wait for the classpath to be - * ready for us. Strangely neither one on their own are good enough. - */ - project.afterEvaluate(new Closure(this, this) { - public void doCall(Project it) { - doFirst(unused -> { - args("-Djna.nosys=true"); - args("-cp", classpath.getAsPath(), "org.elasticsearch.test.NamingConventionsCheck"); - args("--test-class", getTestClass()); - if (skipIntegTestInDisguise) { - args("--skip-integ-tests-in-disguise"); - } else { - args("--integ-test-class", getIntegTestClass()); - } - if (getCheckForTestsInMain()) { - args("--main"); - args("--"); - } else { - args("--"); - } - args(getExistingClassesDirs().getAsPath()); - }); - } - }); - doLast((Task it) -> { - try { - try (FileWriter fw = new FileWriter(getSuccessMarker())) { - fw.write(""); - } - } catch (IOException e) { - throw new GradleException("io exception", e); - } - }); - } - - private SourceSetContainer getJavaSourceSets() { - return getProject().getConvention().getPlugin(JavaPluginConvention.class).getSourceSets(); } + @InputFiles + @SkipWhenEmpty public FileCollection getExistingClassesDirs() { FileCollection classesDirs = getJavaSourceSets().getByName(checkForTestsInMain ? "main" : "test") .getOutput().getClassesDirs(); return classesDirs.filter(it -> it.exists()); } - public File getSuccessMarker() { - return successMarker; - } - - public void setSuccessMarker(File successMarker) { - this.successMarker = successMarker; - } - + @Input public boolean isSkipIntegTestInDisguise() { return skipIntegTestInDisguise; } @@ -130,6 +107,7 @@ public class NamingConventionsTask extends LoggedExec { this.skipIntegTestInDisguise = skipIntegTestInDisguise; } + @Input public String getTestClass() { return testClass; } @@ -138,6 +116,7 @@ public class NamingConventionsTask extends LoggedExec { this.testClass = testClass; } + @Input public String getIntegTestClass() { return integTestClass; } @@ -146,10 +125,7 @@ public class NamingConventionsTask extends LoggedExec { this.integTestClass = integTestClass; } - public boolean getCheckForTestsInMain() { - return checkForTestsInMain; - } - + @Input public boolean isCheckForTestsInMain() { return checkForTestsInMain; } @@ -158,33 +134,34 @@ public class NamingConventionsTask extends LoggedExec { this.checkForTestsInMain = checkForTestsInMain; } + private SourceSetContainer getJavaSourceSets() { + return getProject().getConvention().getPlugin(JavaPluginConvention.class).getSourceSets(); + } + /** - * We use a simple "marker" file that we touch when the task succeeds - * as the task output. This is compared against the modified time of the - * inputs (ie the jars/class files). + * The java home to run the check with */ - @OutputFile - private File successMarker = new File(getProject().getBuildDir(), "markers/" + this.getName()); + private Object javaHome; // Make it an Object to allow for Groovy GString + /** * Should we skip the integ tests in disguise tests? Defaults to true because only core names its * integ tests correctly. */ - @Input private boolean skipIntegTestInDisguise = false; + /** * Superclass for all tests. */ - @Input private String testClass = "org.apache.lucene.util.LuceneTestCase"; + /** * Superclass for all integration tests. */ - @Input private String integTestClass = "org.elasticsearch.test.ESIntegTestCase"; + /** * Should the test also check the main classpath for test classes instead of * doing the usual checks to the test classpath. */ - @Input private boolean checkForTestsInMain = false; } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java new file mode 100644 index 00000000000..6f99e901ec4 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/PrecommitTask.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.precommit; + +import org.gradle.api.DefaultTask; +import org.gradle.api.tasks.OutputFile; +import org.gradle.api.tasks.TaskAction; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.StandardOpenOption; + +public class PrecommitTask extends DefaultTask { + + @OutputFile + public File getSuccessMarker() { + return new File(getProject().getBuildDir(), "markers/" + this.getName()); + } + + @TaskAction + public void writeMarker() throws IOException { + getSuccessMarker().getParentFile().mkdirs(); + Files.write(getSuccessMarker().toPath(), new byte[]{}, StandardOpenOption.CREATE); + } + +} diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 8179e3d096a..5e5c53f4406 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -64,6 +64,7 @@ thirdPartyAudit.excludes = [ task namingConventionsMain(type: org.elasticsearch.gradle.precommit.NamingConventionsTask) { checkForTestsInMain = true + javaHome = project.runtimeJavaHome } precommit.dependsOn namingConventionsMain From 14d57c111524cd0504f058d07599b8c9e7359557 Mon Sep 17 00:00:00 2001 From: Bukhtawar Date: Mon, 17 Sep 2018 16:43:44 +0530 Subject: [PATCH 19/27] Skip rebalancing when cluster_concurrent_rebalance threshold reached (#33329) Allows to skip shard balancing when the cluster_concurrent_rebalance threshold is already reached, which cuts down the time spent in the rebalance method of BalancedShardsAllocator. --- .../decider/ConcurrentRebalanceAllocationDecider.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index 05351109c86..15456ec3e11 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -61,6 +61,11 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { @Override public Decision canRebalance(ShardRouting shardRouting, RoutingAllocation allocation) { + return canRebalance(allocation); + } + + @Override + public Decision canRebalance(RoutingAllocation allocation) { if (clusterConcurrentRebalance == -1) { return allocation.decision(Decision.YES, NAME, "unlimited concurrent rebalances are allowed"); } From b2413d2068f732353184e4fa378f8f20eb56886b Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 17 Sep 2018 13:37:39 +0200 Subject: [PATCH 20/27] [Docs] Fix broken external links in HLRC Rollup documentation Another attempt. Introduced in #33521 --- docs/java-rest/high-level/rollup/put_job.asciidoc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/java-rest/high-level/rollup/put_job.asciidoc b/docs/java-rest/high-level/rollup/put_job.asciidoc index b01d65cb19b..0b7ece05ca8 100644 --- a/docs/java-rest/high-level/rollup/put_job.asciidoc +++ b/docs/java-rest/high-level/rollup/put_job.asciidoc @@ -20,7 +20,7 @@ include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup- ==== Rollup Job Configuration The `RollupJobConfig` object contains all the details about the rollup job -configuration. See <> to learn more +configuration. See {ref}/rollup-job-config.html[Rollup configuration] to learn more about the various configuration settings. A `RollupJobConfig` requires the following arguments: @@ -44,7 +44,7 @@ include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup- The grouping configuration of the Rollup job is defined in the `RollupJobConfig` using a `GroupConfig` instance. `GroupConfig` reflects all the configuration -settings that can be defined using the REST API. See <> +settings that can be defined using the REST API. See {ref}/rollup-job-config.html#rollup-groups-config[Grouping Config] to learn more about these settings. Using the REST API, we could define this grouping configuration: @@ -88,7 +88,7 @@ include-tagged::{doc-tests}/RollupDocumentationIT.java[x-pack-rollup-put-rollup- After defining which groups should be generated for the data, you next configure which metrics should be collected. The list of metrics is defined in the `RollupJobConfig` using a `List` instance. `MetricConfig` reflects all the configuration -settings that can be defined using the REST API. See <> +settings that can be defined using the REST API. See {ref}/rollup-job-config.html#rollup-metrics-config[Metrics Config] to learn more about these settings. Using the REST API, we could define this metrics configuration: From 2d81fc38732f51c480684af1084688c863f95368 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 17 Sep 2018 09:59:22 -0400 Subject: [PATCH 21/27] Keep CCR REST API specification with all of X-Pack (#33743) This commit moves the CCR REST API specification out of the CCR sub-project to locate them with the rest of the REST API specifications for X-Pack. --- x-pack/plugin/ccr/qa/build.gradle | 10 ++++++++++ .../rest-api-spec/api/ccr.create_and_follow_index.json | 0 .../api/ccr.delete_auto_follow_pattern.json | 0 .../resources/rest-api-spec/api/ccr.follow_index.json | 0 .../rest-api-spec/api/ccr.put_auto_follow_pattern.json | 0 .../test/resources/rest-api-spec/api/ccr.stats.json | 0 .../rest-api-spec/api/ccr.unfollow_index.json | 0 7 files changed, 10 insertions(+) rename x-pack/plugin/{ccr/qa/rest => }/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json (100%) rename x-pack/plugin/{ccr/qa/rest => }/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json (100%) rename x-pack/plugin/{ccr/qa/rest => }/src/test/resources/rest-api-spec/api/ccr.follow_index.json (100%) rename x-pack/plugin/{ccr/qa/rest => }/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json (100%) rename x-pack/plugin/{ccr/qa/rest => }/src/test/resources/rest-api-spec/api/ccr.stats.json (100%) rename x-pack/plugin/{ccr/qa/rest => }/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json (100%) diff --git a/x-pack/plugin/ccr/qa/build.gradle b/x-pack/plugin/ccr/qa/build.gradle index ef03d968209..dc44f8f753d 100644 --- a/x-pack/plugin/ccr/qa/build.gradle +++ b/x-pack/plugin/ccr/qa/build.gradle @@ -1,3 +1,13 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +subprojects { + project.tasks.withType(RestIntegTestTask) { + final File xPackResources = new File(xpackProject('plugin').projectDir, 'src/test/resources') + project.copyRestSpec.from(xPackResources) { + include 'rest-api-spec/api/**' + } + } +} /* Remove assemble on all qa projects because we don't need to publish * artifacts for them. */ diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json similarity index 100% rename from x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json similarity index 100% rename from x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.delete_auto_follow_pattern.json diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.follow_index.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_index.json similarity index 100% rename from x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.follow_index.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_index.json diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json similarity index 100% rename from x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.put_auto_follow_pattern.json diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.stats.json similarity index 100% rename from x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.stats.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.stats.json diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json similarity index 100% rename from x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json rename to x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json From a654f21599907266d61253c163354fe132326558 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 17 Sep 2018 16:38:44 +0200 Subject: [PATCH 22/27] TESTS: Fix Concurent Remote Connection Updates (#33707) * Same fix idea as in #10666a4 to prevent background threads trying to reconnect after the tests are done from throwing `ExecutionCancelledException` and breaking the test * Closes #30714 --- .../RemoteClusterConnectionTests.java | 25 ++++++++++++++++--- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 88b01c66898..f5d23c4f3f8 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -389,10 +389,27 @@ public class RemoteClusterConnectionTests extends ESTestCase { throws Exception { CountDownLatch latch = new CountDownLatch(1); AtomicReference exceptionAtomicReference = new AtomicReference<>(); - ActionListener listener = ActionListener.wrap(x -> latch.countDown(), x -> { - exceptionAtomicReference.set(x); - latch.countDown(); - }); + ActionListener listener = ActionListener.wrap( + x -> latch.countDown(), + x -> { + /* + * This can occur on a thread submitted to the thread pool while we are closing the + * remote cluster connection at the end of the test. + */ + if (x instanceof CancellableThreads.ExecutionCancelledException) { + try { + // we should already be shutting down + assertEquals(0L, latch.getCount()); + } finally { + // ensure we count down the latch on failure as well to not prevent failing tests from ending + latch.countDown(); + } + return; + } + exceptionAtomicReference.set(x); + latch.countDown(); + } + ); connection.updateSeedNodes(proxyAddress, seedNodes, listener); latch.await(); if (exceptionAtomicReference.get() != null) { From 7f6c13037c95f151882fe20f064177ea76ce833e Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 17 Sep 2018 16:23:55 +0100 Subject: [PATCH 23/27] Mark testMonitoringService as @AwaitsFix --- .../elasticsearch/xpack/monitoring/integration/MonitoringIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java index e062ea96de3..77a70f5da57 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/integration/MonitoringIT.java @@ -194,6 +194,7 @@ public class MonitoringIT extends ESSingleNodeTestCase { * This test waits for the monitoring service to collect monitoring documents and then checks that all expected documents * have been indexed with the expected information. */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29880") public void testMonitoringService() throws Exception { final boolean createAPMIndex = randomBoolean(); final String indexName = createAPMIndex ? "apm-2017.11.06" : "books"; From 4d0bea705cd10fcfbec3e70d38712ae35eba5b9d Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Mon, 17 Sep 2018 17:56:47 +0200 Subject: [PATCH 24/27] Do not report negative free bytes for DiskThresholdDecider#canAllocate (#33641) Do not report negative free bytes for DiskThresholdDecider#canAllocate (#33641) Closes #33596 --- .../decider/DiskThresholdDecider.java | 30 +++++----- .../DiskThresholdDeciderUnitTests.java | 59 ++++++++++++++++++- .../common/unit/ByteSizeUnitTests.java | 10 ++++ 3 files changed, 84 insertions(+), 15 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index ad30dc49a55..a7426d3e551 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -121,6 +121,7 @@ public class DiskThresholdDecider extends AllocationDecider { // Cache the used disk percentage for displaying disk percentages consistent with documentation double usedDiskPercentage = usage.getUsedDiskAsPercentage(); long freeBytes = usage.getFreeBytes(); + ByteSizeValue freeBytesValue = new ByteSizeValue(freeBytes); if (logger.isTraceEnabled()) { logger.trace("node [{}] has {}% used disk", node.nodeId(), usedDiskPercentage); } @@ -134,22 +135,22 @@ public class DiskThresholdDecider extends AllocationDecider { if (freeBytes < diskThresholdSettings.getFreeBytesThresholdLow().getBytes()) { if (skipLowTresholdChecks == false) { if (logger.isDebugEnabled()) { - logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, preventing allocation", - diskThresholdSettings.getFreeBytesThresholdLow(), freeBytes, node.nodeId()); + logger.debug("less than the required {} free bytes threshold ({} free) on node {}, preventing allocation", + diskThresholdSettings.getFreeBytesThresholdLow(), freeBytesValue, node.nodeId()); } return allocation.decision(Decision.NO, NAME, "the node is above the low watermark cluster setting [%s=%s], having less than the minimum required [%s] free " + "space, actual free: [%s]", CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getLowWatermarkRaw(), - diskThresholdSettings.getFreeBytesThresholdLow(), new ByteSizeValue(freeBytes)); + diskThresholdSettings.getFreeBytesThresholdLow(), freeBytesValue); } else if (freeBytes > diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) { // Allow the shard to be allocated because it is primary that // has never been allocated if it's under the high watermark if (logger.isDebugEnabled()) { - logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, " + + logger.debug("less than the required {} free bytes threshold ({} free) on node {}, " + "but allowing allocation because primary has never been allocated", - diskThresholdSettings.getFreeBytesThresholdLow(), freeBytes, node.nodeId()); + diskThresholdSettings.getFreeBytesThresholdLow(), freeBytesValue, node.nodeId()); } return allocation.decision(Decision.YES, NAME, "the node is above the low watermark, but less than the high watermark, and this primary shard has " + @@ -158,16 +159,16 @@ public class DiskThresholdDecider extends AllocationDecider { // Even though the primary has never been allocated, the node is // above the high watermark, so don't allow allocating the shard if (logger.isDebugEnabled()) { - logger.debug("less than the required {} free bytes threshold ({} bytes free) on node {}, " + + logger.debug("less than the required {} free bytes threshold ({} free) on node {}, " + "preventing allocation even though primary has never been allocated", - diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytes, node.nodeId()); + diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytesValue, node.nodeId()); } return allocation.decision(Decision.NO, NAME, "the node is above the high watermark cluster setting [%s=%s], having less than the minimum required [%s] free " + "space, actual free: [%s]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), - diskThresholdSettings.getFreeBytesThresholdHigh(), new ByteSizeValue(freeBytes)); + diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytesValue); } } @@ -219,15 +220,16 @@ public class DiskThresholdDecider extends AllocationDecider { double freeSpaceAfterShard = freeDiskPercentageAfterShardAssigned(usage, shardSize); long freeBytesAfterShard = freeBytes - shardSize; if (freeBytesAfterShard < diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) { - logger.warn("after allocating, node [{}] would have less than the required " + - "{} free bytes threshold ({} bytes free), preventing allocation", - node.nodeId(), diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytesAfterShard); + logger.warn("after allocating, node [{}] would have less than the required threshold of " + + "{} free (currently {} free, estimated shard size is {}), preventing allocation", + node.nodeId(), diskThresholdSettings.getFreeBytesThresholdHigh(), freeBytesValue, new ByteSizeValue(shardSize)); return allocation.decision(Decision.NO, NAME, "allocating the shard to this node will bring the node above the high watermark cluster setting [%s=%s] " + - "and cause it to have less than the minimum required [%s] of free space (free bytes after shard added: [%s])", + "and cause it to have less than the minimum required [%s] of free space (free: [%s], estimated shard size: [%s])", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), diskThresholdSettings.getHighWatermarkRaw(), - diskThresholdSettings.getFreeBytesThresholdHigh(), new ByteSizeValue(freeBytesAfterShard)); + diskThresholdSettings.getFreeBytesThresholdHigh(), + freeBytesValue, new ByteSizeValue(shardSize)); } if (freeSpaceAfterShard < diskThresholdSettings.getFreeDiskThresholdHigh()) { logger.warn("after allocating, node [{}] would have more than the allowed " + @@ -243,7 +245,7 @@ public class DiskThresholdDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "enough disk for shard on node, free: [%s], shard size: [%s], free after allocating shard: [%s]", - new ByteSizeValue(freeBytes), + freeBytesValue, new ByteSizeValue(shardSize), new ByteSizeValue(freeBytesAfterShard)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index ce53c14807c..ec61439ee14 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing.allocation.decider; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -79,7 +80,8 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { .addAsNew(metaData.index("test")) .build(); - ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(routingTable).build(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() .add(node_0) @@ -110,6 +112,61 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { "disk space than the maximum allowed [90.0%]")); } + public void testCannotAllocateDueToLackOfDiskResources() { + ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss); + + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) + .build(); + + final Index index = metaData.index("test").getIndex(); + + ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), true, EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + DiscoveryNode node_0 = new DiscoveryNode("node_0", buildNewFakeTransportAddress(), Collections.emptyMap(), + new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); + DiscoveryNode node_1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Collections.emptyMap(), + new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.CURRENT); + + RoutingTable routingTable = RoutingTable.builder() + .addAsNew(metaData.index("test")) + .build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(routingTable).build(); + + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(node_0) + .add(node_1) + ).build(); + + // actual test -- after all that bloat :) + + ImmutableOpenMap.Builder leastAvailableUsages = ImmutableOpenMap.builder(); + leastAvailableUsages.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, 0)); // all full + ImmutableOpenMap.Builder mostAvailableUsage = ImmutableOpenMap.builder(); + final int freeBytes = randomIntBetween(20, 100); + mostAvailableUsage.put("node_0", new DiskUsage("node_0", "node_0", "_na_", 100, freeBytes)); + + ImmutableOpenMap.Builder shardSizes = ImmutableOpenMap.builder(); + // way bigger than available space + final long shardSize = randomIntBetween(110, 1000); + shardSizes.put("[test][0][p]", shardSize); + ClusterInfo clusterInfo = new ClusterInfo(leastAvailableUsages.build(), mostAvailableUsage.build(), shardSizes.build(), ImmutableOpenMap.of()); + RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Settings.EMPTY, Collections.singleton(decider)), + clusterState.getRoutingNodes(), clusterState, clusterInfo, System.nanoTime()); + allocation.debugDecision(true); + Decision decision = decider.canAllocate(test_0, new RoutingNode("node_0", node_0), allocation); + assertEquals(Decision.Type.NO, decision.type()); + + assertThat(decision.getExplanation(), containsString( + "allocating the shard to this node will bring the node above the high watermark cluster setting " + +"[cluster.routing.allocation.disk.watermark.high=90%] " + + "and cause it to have less than the minimum required [0b] of free space " + + "(free: [" + freeBytes + "b], estimated shard size: [" + shardSize + "b])")); + } + public void testCanRemainUsesLeastAvailableSpace() { ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss); diff --git a/server/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java b/server/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java index 719313d1c86..1b1ff90aa9e 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java +++ b/server/src/test/java/org/elasticsearch/common/unit/ByteSizeUnitTests.java @@ -81,6 +81,16 @@ public class ByteSizeUnitTests extends ESTestCase { assertThat(PB.toPB(1), equalTo(1L)); } + public void testToString() { + int v = randomIntBetween(1, 1023); + assertThat(new ByteSizeValue(PB.toBytes(v)).toString(), equalTo(v + "pb")); + assertThat(new ByteSizeValue(TB.toBytes(v)).toString(), equalTo(v + "tb")); + assertThat(new ByteSizeValue(GB.toBytes(v)).toString(), equalTo(v + "gb")); + assertThat(new ByteSizeValue(MB.toBytes(v)).toString(), equalTo(v + "mb")); + assertThat(new ByteSizeValue(KB.toBytes(v)).toString(), equalTo(v + "kb")); + assertThat(new ByteSizeValue(BYTES.toBytes(v)).toString(), equalTo(v + "b")); + } + public void testSerialization() throws IOException { for (ByteSizeUnit unit : ByteSizeUnit.values()) { try (BytesStreamOutput out = new BytesStreamOutput()) { From 5d2a01dcc3aa36e95d5a5b187bbfef755d53622b Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 17 Sep 2018 18:00:16 +0200 Subject: [PATCH 25/27] [CCR] Fail with a good error if a follow index does not have ccr metadata (#33761) instead of a NPE. --- .../action/TransportFollowIndexAction.java | 18 ++++---- .../TransportFollowIndexActionTests.java | 43 ++++++++++++------- 2 files changed, 37 insertions(+), 24 deletions(-) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexAction.java index fc0b68db25b..eccda262636 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexAction.java @@ -175,7 +175,8 @@ public class TransportFollowIndexAction extends HandledTransportAction ccrIndexMetadata = followIndexMetadata.getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); + String[] recordedLeaderShardHistoryUUIDs = extractIndexShardHistoryUUIDs(ccrIndexMetadata); String recordedLeaderShardHistoryUUID = recordedLeaderShardHistoryUUIDs[shardId]; ShardFollowTask shardFollowTask = new ShardFollowTask( @@ -245,16 +246,18 @@ public class TransportFollowIndexAction extends HandledTransportAction ccrIndexMetadata = followIndex.getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY); + if (ccrIndexMetadata == null) { + throw new IllegalArgumentException("follow index ["+ followIndex.getIndex().getName() + "] does not have ccr metadata"); + } String leaderIndexUUID = leaderIndex.getIndex().getUUID(); - String recordedLeaderIndexUUID = followIndex - .getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY) - .get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY); + String recordedLeaderIndexUUID = ccrIndexMetadata.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY); if (leaderIndexUUID.equals(recordedLeaderIndexUUID) == false) { throw new IllegalArgumentException("follow index [" + request.getFollowerIndex() + "] should reference [" + leaderIndexUUID + "] as leader index but instead reference [" + recordedLeaderIndexUUID + "] as leader index"); } - String[] recordedHistoryUUIDs = extractIndexShardHistoryUUIDs(followIndex); + String[] recordedHistoryUUIDs = extractIndexShardHistoryUUIDs(ccrIndexMetadata); assert recordedHistoryUUIDs.length == leaderIndexHistoryUUID.length; for (int i = 0; i < leaderIndexHistoryUUID.length; i++) { String recordedLeaderIndexHistoryUUID = recordedHistoryUUIDs[i]; @@ -296,9 +299,8 @@ public class TransportFollowIndexAction extends HandledTransportAction ccrIndexMetaData) { + String historyUUIDs = ccrIndexMetaData.get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS); return historyUUIDs.split(","); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexActionTests.java index f168bccc8ca..8d4704566fd 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowIndexActionTests.java @@ -49,9 +49,16 @@ public class TransportFollowIndexActionTests extends ESTestCase { () -> validate(request, leaderIMD, null, null, null)); assertThat(e.getMessage(), equalTo("follow index [index2] does not exist")); } + { + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY, null); + IndexMetaData followIMD = createIMD("index2", 5, Settings.EMPTY, null); + Exception e = expectThrows(IllegalArgumentException.class, + () -> validate(request, leaderIMD, followIMD, UUIDs, null)); + assertThat(e.getMessage(), equalTo("follow index [index2] does not have ccr metadata")); + } { // should fail because the recorded leader index uuid is not equal to the leader actual index - IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY, customMetaData); + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY, null); IndexMetaData followIMD = createIMD("index2", 5, Settings.EMPTY, singletonMap(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY, "another-value")); Exception e = expectThrows(IllegalArgumentException.class, @@ -61,7 +68,7 @@ public class TransportFollowIndexActionTests extends ESTestCase { } { // should fail because the recorded leader index history uuid is not equal to the leader actual index history uuid: - IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY, emptyMap()); + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY, null); Map anotherCustomMetaData = new HashMap<>(); anotherCustomMetaData.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY, "_na_"); anotherCustomMetaData.put(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_SHARD_HISTORY_UUIDS, "another-uuid"); @@ -73,7 +80,7 @@ public class TransportFollowIndexActionTests extends ESTestCase { } { // should fail because leader index does not have soft deletes enabled - IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY, emptyMap()); + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY, null); IndexMetaData followIMD = createIMD("index2", 5, Settings.EMPTY, customMetaData); Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null)); assertThat(e.getMessage(), equalTo("leader index [index1] does not have soft deletes enabled")); @@ -81,7 +88,7 @@ public class TransportFollowIndexActionTests extends ESTestCase { { // should fail because the number of primary shards between leader and follow index are not equal IndexMetaData leaderIMD = createIMD("index1", 5, Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), emptyMap()); + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), null); IndexMetaData followIMD = createIMD("index2", 4, Settings.EMPTY, customMetaData); Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null)); assertThat(e.getMessage(), @@ -90,7 +97,7 @@ public class TransportFollowIndexActionTests extends ESTestCase { { // should fail, because leader index is closed IndexMetaData leaderIMD = createIMD("index1", State.CLOSE, "{}", 5, Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), emptyMap()); + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), null); IndexMetaData followIMD = createIMD("index2", State.OPEN, "{}", 5, Settings.builder() .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), customMetaData); Exception e = expectThrows(IllegalArgumentException.class, () -> validate(request, leaderIMD, followIMD, UUIDs, null)); @@ -99,7 +106,7 @@ public class TransportFollowIndexActionTests extends ESTestCase { { // should fail, because index.xpack.ccr.following_index setting has not been enabled in leader index IndexMetaData leaderIMD = createIMD("index1", 1, - Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), customMetaData); + Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), null); IndexMetaData followIMD = createIMD("index2", 1, Settings.EMPTY, customMetaData); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2"); mapperService.updateMapping(null, followIMD); @@ -111,7 +118,7 @@ public class TransportFollowIndexActionTests extends ESTestCase { { // should fail, because leader has a field with the same name mapped as keyword and follower as text IndexMetaData leaderIMD = createIMD("index1", State.OPEN, "{\"properties\": {\"field\": {\"type\": \"keyword\"}}}", 5, - Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), emptyMap()); + Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), null); IndexMetaData followIMD = createIMD("index2", State.OPEN, "{\"properties\": {\"field\": {\"type\": \"text\"}}}", 5, Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build(), customMetaData); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2"); @@ -125,7 +132,7 @@ public class TransportFollowIndexActionTests extends ESTestCase { IndexMetaData leaderIMD = createIMD("index1", State.OPEN, mapping, 5, Settings.builder() .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true") .put("index.analysis.analyzer.my_analyzer.type", "custom") - .put("index.analysis.analyzer.my_analyzer.tokenizer", "whitespace").build(), emptyMap()); + .put("index.analysis.analyzer.my_analyzer.tokenizer", "whitespace").build(), null); IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder() .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) .put("index.analysis.analyzer.my_analyzer.type", "custom") @@ -136,7 +143,7 @@ public class TransportFollowIndexActionTests extends ESTestCase { { // should fail because the following index does not have the following_index settings IndexMetaData leaderIMD = createIMD("index1", 5, - Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), emptyMap()); + Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), null); Settings followingIndexSettings = randomBoolean() ? Settings.EMPTY : Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), false).build(); IndexMetaData followIMD = createIMD("index2", 5, followingIndexSettings, customMetaData); @@ -151,7 +158,7 @@ public class TransportFollowIndexActionTests extends ESTestCase { { // should succeed IndexMetaData leaderIMD = createIMD("index1", 5, Settings.builder() - .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), emptyMap()); + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build(), null); IndexMetaData followIMD = createIMD("index2", 5, Settings.builder() .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build(), customMetaData); MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2"); @@ -164,7 +171,7 @@ public class TransportFollowIndexActionTests extends ESTestCase { IndexMetaData leaderIMD = createIMD("index1", State.OPEN, mapping, 5, Settings.builder() .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true") .put("index.analysis.analyzer.my_analyzer.type", "custom") - .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), emptyMap()); + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), null); IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder() .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) .put("index.analysis.analyzer.my_analyzer.type", "custom") @@ -181,7 +188,7 @@ public class TransportFollowIndexActionTests extends ESTestCase { .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true") .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") .put("index.analysis.analyzer.my_analyzer.type", "custom") - .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), emptyMap()); + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build(), null); IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder() .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "10s") @@ -207,15 +214,19 @@ public class TransportFollowIndexActionTests extends ESTestCase { int numberOfShards, Settings settings, Map custom) throws IOException { - return IndexMetaData.builder(index) + IndexMetaData.Builder builder = IndexMetaData.builder(index) .settings(settings(Version.CURRENT).put(settings)) .numberOfShards(numberOfShards) .state(state) .numberOfReplicas(0) .setRoutingNumShards(numberOfShards) - .putMapping("_doc", mapping) - .putCustom(Ccr.CCR_CUSTOM_METADATA_KEY, custom) - .build(); + .putMapping("_doc", mapping); + + if (custom != null) { + builder.putCustom(Ccr.CCR_CUSTOM_METADATA_KEY, custom); + } + + return builder.build(); } } From 7046cc467f63186fc5b9e7d388f4a9ceb25b8bed Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 17 Sep 2018 18:08:19 +0200 Subject: [PATCH 26/27] [CCR] Make index.xpack.ccr.following_index an internal setting (#33768) --- .../elasticsearch/xpack/ccr/CcrSettings.java | 2 +- .../xpack/ccr/ShardChangesIT.java | 20 +++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java index 122f5a913d2..d3f5c85b4f8 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java @@ -27,7 +27,7 @@ public final class CcrSettings { * Index setting for a following index. */ public static final Setting CCR_FOLLOWING_INDEX_SETTING = - Setting.boolSetting("index.xpack.ccr.following_index", false, Setting.Property.IndexScope); + Setting.boolSetting("index.xpack.ccr.following_index", false, Property.IndexScope, Property.InternalIndex); /** * Setting for controlling the interval in between polling leader clusters to check whether there are indices to follow diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java index 932000d766c..73737623a40 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java @@ -9,6 +9,8 @@ package org.elasticsearch.xpack.ccr; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.bulk.BulkProcessor; import org.elasticsearch.action.bulk.BulkRequest; @@ -477,6 +479,24 @@ public class ShardChangesIT extends ESIntegTestCase { assertThat(e.getMessage(), containsString("follow index [index2] should reference")); } + public void testAttemptToChangeCcrFollowingIndexSetting() throws Exception { + String leaderIndexSettings = getIndexSettings(1, 0, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON).get()); + ensureYellow("index1"); + FollowIndexAction.Request followRequest = createFollowRequest("index1", "index2"); + CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); + client().execute(CreateAndFollowIndexAction.INSTANCE, createAndFollowRequest).get(); + unfollowIndex("index2"); + client().admin().indices().close(new CloseIndexRequest("index2")).actionGet(); + + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest("index2"); + updateSettingsRequest.settings(Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), false).build()); + Exception e = expectThrows(IllegalArgumentException.class, + () -> client().admin().indices().updateSettings(updateSettingsRequest).actionGet()); + assertThat(e.getMessage(), equalTo("can not update internal setting [index.xpack.ccr.following_index]; " + + "this setting is managed via a dedicated API")); + } + private CheckedRunnable assertTask(final int numberOfPrimaryShards, final Map numDocsPerShard) { return () -> { final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); From 48a5b45d28a0740231128c00a04586b5a2c1cb12 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 17 Sep 2018 18:18:58 +0200 Subject: [PATCH 27/27] Ensure fully deleted segments are accounted for correctly (#33757) We can't rely on the leaf reader ordinal in a wrapped reader since it might not correspond to the ordinal in the SegmentInfos for it's SegmentCommitInfo. Relates to #32844 Closes #33689 Closes #33755 --- .../snapshots/SourceOnlySnapshot.java | 71 +++++++++++++++++-- .../SourceOnlySnapshotRepository.java | 2 +- .../snapshots/SourceOnlySnapshotIT.java | 38 +++++----- .../SourceOnlySnapshotShardTests.java | 1 - .../snapshots/SourceOnlySnapshotTests.java | 54 ++++++++++++++ 5 files changed, 141 insertions(+), 25 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java index b7d6a51f45a..6c38a25f69a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshot.java @@ -82,15 +82,25 @@ public class SourceOnlySnapshot { try (Lock writeLock = targetDirectory.obtainLock(IndexWriter.WRITE_LOCK_NAME); StandardDirectoryReader reader = (StandardDirectoryReader) DirectoryReader.open(commit)) { SegmentInfos segmentInfos = reader.getSegmentInfos(); - DirectoryReader wrapper = wrapReader(reader); List newInfos = new ArrayList<>(); - for (LeafReaderContext ctx : wrapper.leaves()) { - SegmentCommitInfo info = segmentInfos.info(ctx.ord); + for (LeafReaderContext ctx : reader.leaves()) { LeafReader leafReader = ctx.reader(); - LiveDocs liveDocs = getLiveDocs(leafReader); - if (leafReader.numDocs() != 0) { // fully deleted segments don't need to be processed - SegmentCommitInfo newInfo = syncSegment(info, liveDocs, leafReader.getFieldInfos(), existingSegments, createdFiles); - newInfos.add(newInfo); + SegmentCommitInfo info = reader.getSegmentInfos().info(ctx.ord); + assert info.info.equals(Lucene.segmentReader(ctx.reader()).getSegmentInfo().info); + /* We could do this totally different without wrapping this dummy directory reader if FilterCodecReader would have a + * getDelegate method. This is fixed in LUCENE-8502 but we need to wait for it to come in 7.5.1 or 7.6. + * The reason here is that the ctx.ord is not guaranteed to be equivalent to the SegmentCommitInfo ord in the SegmentInfo + * object since we might drop fully deleted segments. if that happens we are using the wrong reader for the SI and + * might almost certainly expose deleted documents. + */ + DirectoryReader wrappedReader = wrapReader(new DummyDirectoryReader(reader.directory(), leafReader)); + if (wrappedReader.leaves().isEmpty() == false) { + leafReader = wrappedReader.leaves().get(0).reader(); + LiveDocs liveDocs = getLiveDocs(leafReader); + if (leafReader.numDocs() != 0) { // fully deleted segments don't need to be processed + SegmentCommitInfo newInfo = syncSegment(info, liveDocs, leafReader.getFieldInfos(), existingSegments, createdFiles); + newInfos.add(newInfo); + } } } segmentInfos.clear(); @@ -258,4 +268,51 @@ public class SourceOnlySnapshot { this.bits = bits; } } + + private static class DummyDirectoryReader extends DirectoryReader { + + protected DummyDirectoryReader(Directory directory, LeafReader... segmentReaders) throws IOException { + super(directory, segmentReaders); + } + + @Override + protected DirectoryReader doOpenIfChanged() throws IOException { + return null; + } + + @Override + protected DirectoryReader doOpenIfChanged(IndexCommit commit) throws IOException { + return null; + } + + @Override + protected DirectoryReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws IOException { + return null; + } + + @Override + public long getVersion() { + return 0; + } + + @Override + public boolean isCurrent() throws IOException { + return false; + } + + @Override + public IndexCommit getIndexCommit() throws IOException { + return null; + } + + @Override + protected void doClose() throws IOException { + + } + + @Override + public CacheHelper getReaderCacheHelper() { + return null; + } + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java index a75d5f488ee..704f4d90344 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/snapshots/SourceOnlySnapshotRepository.java @@ -126,7 +126,7 @@ public final class SourceOnlySnapshotRepository extends FilterRepository { SourceOnlySnapshot snapshot = new SourceOnlySnapshot(tempStore.directory(), querySupplier); snapshot.syncSnapshot(snapshotIndexCommit); // we will use the lucene doc ID as the seq ID so we set the local checkpoint to maxDoc with a new index UUID - SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); + SegmentInfos segmentInfos = tempStore.readLastCommittedSegmentsInfo(); tempStore.bootstrapNewHistory(segmentInfos.totalMaxDoc()); store.incRef(); try (DirectoryReader reader = DirectoryReader.open(tempStore.directory())) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java index 6d3a17e3ebf..737e2e26970 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotIT.java @@ -49,7 +49,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutionException; -import java.util.function.Consumer; +import java.util.function.BiConsumer; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -97,7 +97,10 @@ public class SourceOnlySnapshotIT extends ESIntegTestCase { boolean requireRouting = randomBoolean(); boolean useNested = randomBoolean(); IndexRequestBuilder[] builders = snashotAndRestore(sourceIdx, 1, true, requireRouting, useNested); - assertHits(sourceIdx, builders.length); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(sourceIdx).clear().setDocs(true).get(); + long deleted = indicesStatsResponse.getTotal().docs.getDeleted(); + boolean sourceHadDeletions = deleted > 0; // we use indexRandom which might create holes ie. deleted docs + assertHits(sourceIdx, builders.length, sourceHadDeletions); assertMappings(sourceIdx, requireRouting, useNested); SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> { client().prepareSearch(sourceIdx).setQuery(QueryBuilders.idsQuery() @@ -116,7 +119,7 @@ public class SourceOnlySnapshotIT extends ESIntegTestCase { client().admin().indices().prepareUpdateSettings(sourceIdx) .setSettings(Settings.builder().put("index.number_of_replicas", 1)).get(); ensureGreen(sourceIdx); - assertHits(sourceIdx, builders.length); + assertHits(sourceIdx, builders.length, sourceHadDeletions); } public void testSnapshotAndRestoreWithNested() throws Exception { @@ -125,7 +128,7 @@ public class SourceOnlySnapshotIT extends ESIntegTestCase { IndexRequestBuilder[] builders = snashotAndRestore(sourceIdx, 1, true, requireRouting, true); IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().clear().setDocs(true).get(); assertThat(indicesStatsResponse.getTotal().docs.getDeleted(), Matchers.greaterThan(0L)); - assertHits(sourceIdx, builders.length); + assertHits(sourceIdx, builders.length, true); assertMappings(sourceIdx, requireRouting, true); SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch(sourceIdx).setQuery(QueryBuilders.idsQuery().addIds("" + randomIntBetween(0, builders.length))).get()); @@ -141,7 +144,7 @@ public class SourceOnlySnapshotIT extends ESIntegTestCase { client().admin().indices().prepareUpdateSettings(sourceIdx).setSettings(Settings.builder().put("index.number_of_replicas", 1)) .get(); ensureGreen(sourceIdx); - assertHits(sourceIdx, builders.length); + assertHits(sourceIdx, builders.length, true); } private void assertMappings(String sourceIdx, boolean requireRouting, boolean useNested) throws IOException { @@ -165,15 +168,12 @@ public class SourceOnlySnapshotIT extends ESIntegTestCase { } } - private void assertHits(String index, int numDocsExpected) { + private void assertHits(String index, int numDocsExpected, boolean sourceHadDeletions) { SearchResponse searchResponse = client().prepareSearch(index) .addSort(SeqNoFieldMapper.NAME, SortOrder.ASC) .setSize(numDocsExpected).get(); - Consumer assertConsumer = res -> { + BiConsumer assertConsumer = (res, allowHoles) -> { SearchHits hits = res.getHits(); - IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats().clear().setDocs(true).get(); - long deleted = indicesStatsResponse.getTotal().docs.getDeleted(); - boolean allowHoles = deleted > 0; // we use indexRandom which might create holes ie. deleted docs long i = 0; for (SearchHit hit : hits) { String id = hit.getId(); @@ -190,18 +190,24 @@ public class SourceOnlySnapshotIT extends ESIntegTestCase { assertEquals("r" + id, hit.field("_routing").getValue()); } }; - assertConsumer.accept(searchResponse); + assertConsumer.accept(searchResponse, sourceHadDeletions); assertEquals(numDocsExpected, searchResponse.getHits().totalHits); searchResponse = client().prepareSearch(index) .addSort(SeqNoFieldMapper.NAME, SortOrder.ASC) .setScroll("1m") .slice(new SliceBuilder(SeqNoFieldMapper.NAME, randomIntBetween(0,1), 2)) .setSize(randomIntBetween(1, 10)).get(); - do { - // now do a scroll with a slice - assertConsumer.accept(searchResponse); - searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(1)).get(); - } while (searchResponse.getHits().getHits().length > 0); + try { + do { + // now do a scroll with a slice + assertConsumer.accept(searchResponse, true); + searchResponse = client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueMinutes(1)).get(); + } while (searchResponse.getHits().getHits().length > 0); + } finally { + if (searchResponse.getScrollId() != null) { + client().prepareClearScroll().addScrollId(searchResponse.getScrollId()).get(); + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java index 261133b8907..7058724ecf0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotShardTests.java @@ -162,7 +162,6 @@ public class SourceOnlySnapshotShardTests extends IndexShardTestCase { return "{ \"value\" : \"" + randomAlphaOfLength(10) + "\"}"; } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33689") public void testRestoreMinmal() throws IOException { IndexShard shard = newStartedShard(true); int numInitialDocs = randomIntBetween(10, 100); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotTests.java index e7d731739de..f3b3aed0bf3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/snapshots/SourceOnlySnapshotTests.java @@ -12,10 +12,12 @@ import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.StoredField; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; +import org.apache.lucene.index.CodecReader; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.FilterMergePolicy; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy; @@ -34,6 +36,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.IOSupplier; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.test.ESTestCase; @@ -242,4 +245,55 @@ public class SourceOnlySnapshotTests extends ESTestCase { reader.close(); } } + + public void testFullyDeletedSegments() throws IOException { + try (Directory dir = newDirectory()) { + SnapshotDeletionPolicy deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); + IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig() + .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) + .setIndexDeletionPolicy(deletionPolicy).setMergePolicy(new FilterMergePolicy(NoMergePolicy.INSTANCE) { + @Override + public boolean useCompoundFile(SegmentInfos infos, SegmentCommitInfo mergedInfo, MergeContext mergeContext) { + return randomBoolean(); + } + + @Override + public boolean keepFullyDeletedSegment(IOSupplier readerIOSupplier) throws IOException { + return true; + } + })); + Document doc = new Document(); + doc.add(new StringField("id", "1", Field.Store.YES)); + doc.add(new TextField("text", "the quick brown fox", Field.Store.NO)); + doc.add(new NumericDocValuesField("rank", 1)); + doc.add(new StoredField("rank", 1)); + doc.add(new StoredField("src", "the quick brown fox")); + writer.addDocument(doc); + writer.commit(); + doc = new Document(); + doc.add(new StringField("id", "1", Field.Store.YES)); + doc.add(new TextField("text", "the quick brown fox", Field.Store.NO)); + doc.add(new NumericDocValuesField("rank", 3)); + doc.add(new StoredField("rank", 3)); + doc.add(new StoredField("src", "the quick brown fox")); + writer.softUpdateDocument(new Term("id", "1"), doc, new NumericDocValuesField(Lucene.SOFT_DELETES_FIELD, 1)); + writer.commit(); + try (Directory targetDir = newDirectory()) { + IndexCommit snapshot = deletionPolicy.snapshot(); + SourceOnlySnapshot snapshoter = new SourceOnlySnapshot(targetDir); + snapshoter.syncSnapshot(snapshot); + + try (DirectoryReader snapReader = DirectoryReader.open(targetDir)) { + assertEquals(snapReader.maxDoc(), 1); + assertEquals(snapReader.numDocs(), 1); + assertEquals("3", snapReader.document(0).getField("rank").stringValue()); + } + try (IndexReader writerReader = DirectoryReader.open(writer)) { + assertEquals(writerReader.maxDoc(), 2); + assertEquals(writerReader.numDocs(), 1); + } + } + writer.close(); + } + } }