diff --git a/build.gradle b/build.gradle index f8282ca5ae8..ad92a379b66 100644 --- a/build.gradle +++ b/build.gradle @@ -20,16 +20,12 @@ import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.BuildPlugin -import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionCollection import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.gradle.plugins.ide.eclipse.model.SourceFolder -import java.nio.file.Files -import java.nio.file.Path - plugins { id 'com.gradle.build-scan' version '1.13.2' } @@ -576,62 +572,6 @@ wrapper { } } -static void assertLinesInFile(final Path path, final List expectedLines) { - final List actualLines = Files.readAllLines(path) - int line = 0 - for (final String expectedLine : expectedLines) { - final String actualLine = actualLines.get(line) - if (expectedLine != actualLine) { - throw new GradleException("expected line [${line + 1}] in [${path}] to be [${expectedLine}] but was [${actualLine}]") - } - line++ - } -} - -/* - * Check that all generated JARs have our NOTICE.txt and an appropriate - * LICENSE.txt in them. We configurate this in gradle but we'd like to - * be extra paranoid. - */ -subprojects { project -> - project.tasks.withType(Jar).whenTaskAdded { jarTask -> - final Task extract = project.task("extract${jarTask.name.capitalize()}", type: LoggedExec) { - dependsOn jarTask - ext.destination = project.buildDir.toPath().resolve("jar-extracted/${jarTask.name}") - commandLine "${->new File(rootProject.compilerJavaHome, 'bin/jar')}", - 'xf', "${-> jarTask.outputs.files.singleFile}", 'META-INF/LICENSE.txt', 'META-INF/NOTICE.txt' - workingDir destination - onlyIf {jarTask.enabled} - doFirst { - project.delete(destination) - Files.createDirectories(destination) - } - } - - final Task checkNotice = project.task("verify${jarTask.name.capitalize()}Notice") { - dependsOn extract - onlyIf {jarTask.enabled} - doLast { - final List noticeLines = Files.readAllLines(project.noticeFile.toPath()) - final Path noticePath = extract.destination.resolve('META-INF/NOTICE.txt') - assertLinesInFile(noticePath, noticeLines) - } - } - project.check.dependsOn checkNotice - - final Task checkLicense = project.task("verify${jarTask.name.capitalize()}License") { - dependsOn extract - onlyIf {jarTask.enabled} - doLast { - final List licenseLines = Files.readAllLines(project.licenseFile.toPath()) - final Path licensePath = extract.destination.resolve('META-INF/LICENSE.txt') - assertLinesInFile(licensePath, licenseLines) - } - } - project.check.dependsOn checkLicense - } -} - /* Remove assemble/dependenciesInfo on all qa projects because we don't need to publish * artifacts for them. */ gradle.projectsEvaluated { diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index dce14b10fcb..da8ad788164 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -24,15 +24,6 @@ plugins { id 'groovy' } -gradlePlugin { - plugins { - simplePlugin { - id = 'elasticsearch.clusterformation' - implementationClass = 'org.elasticsearch.gradle.clusterformation.ClusterformationPlugin' - } - } -} - group = 'org.elasticsearch.gradle' String minimumGradleVersion = file('src/main/resources/minimumGradleVersion').text.trim() diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 75b5676cc34..1e3446f3ccd 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -56,6 +56,7 @@ import org.gradle.util.GradleVersion import java.nio.charset.StandardCharsets import java.time.ZoneOffset import java.time.ZonedDateTime + /** * Encapsulates build configuration for elasticsearch projects. */ @@ -739,6 +740,7 @@ class BuildPlugin implements Plugin { } from(project.noticeFile.parent) { include project.noticeFile.name + rename { 'NOTICE.txt' } } } } diff --git a/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.clusterformation.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.clusterformation.properties new file mode 100644 index 00000000000..dfd6cd9956a --- /dev/null +++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.clusterformation.properties @@ -0,0 +1 @@ +implementation-class=org.elasticsearch.gradle.clusterformation.ClusterformationPlugin diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginIT.java new file mode 100644 index 00000000000..dd0dbb25208 --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildPluginIT.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle; + +import org.apache.commons.io.IOUtils; +import org.elasticsearch.gradle.test.GradleIntegrationTestCase; +import org.gradle.testkit.runner.BuildResult; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.zip.ZipEntry; +import java.util.zip.ZipFile; + +public class BuildPluginIT extends GradleIntegrationTestCase { + + public void testPluginCanBeApplied() { + BuildResult result = getGradleRunner("elasticsearch.build") + .withArguments("hello", "-s") + .build(); + assertTaskSuccessful(result, ":hello"); + assertOutputContains("build plugin can be applied"); + } + + public void testCheckTask() { + BuildResult result = getGradleRunner("elasticsearch.build") + .withArguments("check", "assemble", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath()) + .build(); + assertTaskSuccessful(result, ":check"); + } + + public void testLicenseAndNotice() throws IOException { + BuildResult result = getGradleRunner("elasticsearch.build") + .withArguments("clean", "assemble", "-s", "-Dlocal.repo.path=" + getLocalTestRepoPath()) + .build(); + + assertTaskSuccessful(result, ":assemble"); + + assertBuildFileExists(result, "elasticsearch.build", "distributions/elasticsearch.build.jar"); + + try (ZipFile zipFile = new ZipFile(new File( + getBuildDir("elasticsearch.build"), "distributions/elasticsearch.build.jar" + ))) { + ZipEntry licenseEntry = zipFile.getEntry("META-INF/LICENSE.txt"); + ZipEntry noticeEntry = zipFile.getEntry("META-INF/NOTICE.txt"); + assertNotNull("Jar does not have META-INF/LICENSE.txt", licenseEntry); + assertNotNull("Jar does not have META-INF/NOTICE.txt", noticeEntry); + try ( + InputStream license = zipFile.getInputStream(licenseEntry); + InputStream notice = zipFile.getInputStream(noticeEntry) + ) { + assertEquals("this is a test license file", IOUtils.toString(license, StandardCharsets.UTF_8.name())); + assertEquals("this is a test notice file", IOUtils.toString(notice, StandardCharsets.UTF_8.name())); + } + } + } + + +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/NamingConventionsTaskIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/NamingConventionsTaskIT.java index 7e469e8597d..745c63cd4dc 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/NamingConventionsTaskIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/precommit/NamingConventionsTaskIT.java @@ -2,74 +2,57 @@ package org.elasticsearch.gradle.precommit; import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; -import org.gradle.testkit.runner.GradleRunner; -import org.gradle.testkit.runner.TaskOutcome; import java.util.Arrays; +import java.util.HashSet; public class NamingConventionsTaskIT extends GradleIntegrationTestCase { - public void testPluginCanBeApplied() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("namingConventionsSelfTest")) - .withArguments("hello", "-s", "-PcheckForTestsInMain=false") - .withPluginClasspath() - .build(); - - assertEquals(TaskOutcome.SUCCESS, result.task(":hello").getOutcome()); - String output = result.getOutput(); - assertTrue(output, output.contains("build plugin can be applied")); - } - public void testNameCheckFailsAsItShould() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("namingConventionsSelfTest")) + BuildResult result = getGradleRunner("namingConventionsSelfTest") .withArguments("namingConventions", "-s", "-PcheckForTestsInMain=false") - .withPluginClasspath() .buildAndFail(); - assertNotNull("task did not run", result.task(":namingConventions")); - assertEquals(TaskOutcome.FAILED, result.task(":namingConventions").getOutcome()); - String output = result.getOutput(); - for (String line : Arrays.asList( - "Found inner classes that are tests, which are excluded from the test runner:", - "* org.elasticsearch.test.NamingConventionsCheckInMainIT$InternalInvalidTests", - "Classes ending with [Tests] must subclass [UnitTestCase]:", - "* org.elasticsearch.test.NamingConventionsCheckInMainTests", - "* org.elasticsearch.test.NamingConventionsCheckInMainIT", - "Not all subclasses of UnitTestCase match the naming convention. Concrete classes must end with [Tests]:", - "* org.elasticsearch.test.WrongName")) { - assertTrue( - "expected: '" + line + "' but it was not found in the output:\n" + output, - output.contains(line) - ); - } + assertTaskFailed(result, ":namingConventions"); + assertOutputContains( + result.getOutput(), + // TODO: java9 Set.of + new HashSet<>( + Arrays.asList( + "Not all subclasses of UnitTestCase match the naming convention. Concrete classes must end with [Tests]:", + "* org.elasticsearch.test.WrongName", + "Found inner classes that are tests, which are excluded from the test runner:", + "* org.elasticsearch.test.NamingConventionsCheckInMainIT$InternalInvalidTests", + "Classes ending with [Tests] must subclass [UnitTestCase]:", + "* org.elasticsearch.test.NamingConventionsCheckInMainTests", + "* org.elasticsearch.test.NamingConventionsCheckInMainIT" + ) + ) + ); } public void testNameCheckFailsAsItShouldWithMain() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("namingConventionsSelfTest")) + BuildResult result = getGradleRunner("namingConventionsSelfTest") .withArguments("namingConventions", "-s", "-PcheckForTestsInMain=true") - .withPluginClasspath() .buildAndFail(); - assertNotNull("task did not run", result.task(":namingConventions")); - assertEquals(TaskOutcome.FAILED, result.task(":namingConventions").getOutcome()); - - String output = result.getOutput(); - for (String line : Arrays.asList( - "Classes ending with [Tests] or [IT] or extending [UnitTestCase] must be in src/test/java:", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyInterfaceTests", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyAbstractTests", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$InnerTests", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$NotImplementingTests", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$WrongNameTheSecond", - "* org.elasticsearch.test.NamingConventionsCheckBadClasses$WrongName")) { - assertTrue( - "expected: '" + line + "' but it was not found in the output:\n"+output, - output.contains(line) - ); - } + assertTaskFailed(result, ":namingConventions"); + assertOutputContains( + result.getOutput(), + // TODO: java9 Set.of + new HashSet<>( + Arrays.asList( + "Classes ending with [Tests] or [IT] or extending [UnitTestCase] must be in src/test/java:", + "* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyInterfaceTests", + "* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyInterfaceTests", + "* org.elasticsearch.test.NamingConventionsCheckBadClasses$DummyAbstractTests", + "* org.elasticsearch.test.NamingConventionsCheckBadClasses$InnerTests", + "* org.elasticsearch.test.NamingConventionsCheckBadClasses$NotImplementingTests", + "* org.elasticsearch.test.NamingConventionsCheckBadClasses$WrongNameTheSecond", + "* org.elasticsearch.test.NamingConventionsCheckBadClasses$WrongName" + ) + ) + ); } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index a1d4b86ab76..f8e3cf88c40 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -10,6 +10,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.List; import java.util.Objects; +import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -47,6 +48,12 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { } } + protected void assertOutputContains(String output, Set lines) { + for (String line : lines) { + assertOutputContains(output, line); + } + } + protected void assertOutputContains(String output, String line) { assertTrue( "Expected the following line in output:\n\n" + line + "\n\nOutput is:\n" + output, @@ -82,7 +89,7 @@ public abstract class GradleIntegrationTestCase extends GradleUnitTestCase { "\n\nOutput is:\n" + result.getOutput()); } assertEquals( - "Expected task to be successful but it was: " + task.getOutcome() + + "Expected task `" + taskName +"` to be successful but it was: " + task.getOutcome() + taskOutcome + "\n\nOutput is:\n" + result.getOutput() , taskOutcome, task.getOutcome() diff --git a/buildSrc/src/testKit/elasticsearch.build/LICENSE b/buildSrc/src/testKit/elasticsearch.build/LICENSE new file mode 100644 index 00000000000..cf6ea07b188 --- /dev/null +++ b/buildSrc/src/testKit/elasticsearch.build/LICENSE @@ -0,0 +1 @@ +this is a test license file \ No newline at end of file diff --git a/buildSrc/src/testKit/elasticsearch.build/NOTICE b/buildSrc/src/testKit/elasticsearch.build/NOTICE new file mode 100644 index 00000000000..0c070fe7424 --- /dev/null +++ b/buildSrc/src/testKit/elasticsearch.build/NOTICE @@ -0,0 +1 @@ +this is a test notice file \ No newline at end of file diff --git a/buildSrc/src/testKit/elasticsearch.build/build.gradle b/buildSrc/src/testKit/elasticsearch.build/build.gradle new file mode 100644 index 00000000000..2a9e8fa3ec9 --- /dev/null +++ b/buildSrc/src/testKit/elasticsearch.build/build.gradle @@ -0,0 +1,36 @@ +plugins { + id 'java' + id 'elasticsearch.build' +} + +ext.licenseFile = file("LICENSE") +ext.noticeFile = file("NOTICE") + +dependencies { + compile "junit:junit:${versions.junit}" + // missing classes in thirdparty audit + compile 'org.hamcrest:hamcrest-core:1.3' +} + +repositories { + mavenCentral() + repositories { + maven { + url System.getProperty("local.repo.path") + } + } +} + +// todo remove offending rules +forbiddenApisMain.enabled = false +forbiddenApisTest.enabled = false +// requires dependency on testing fw +jarHell.enabled = false +// we don't have tests for now +test.enabled = false + +task hello { + doFirst { + println "build plugin can be applied" + } +} diff --git a/buildSrc/src/testKit/elasticsearch.build/licenses/hamcrest-core-1.3.jar.sha1 b/buildSrc/src/testKit/elasticsearch.build/licenses/hamcrest-core-1.3.jar.sha1 new file mode 100644 index 00000000000..1085ece454c --- /dev/null +++ b/buildSrc/src/testKit/elasticsearch.build/licenses/hamcrest-core-1.3.jar.sha1 @@ -0,0 +1 @@ +42a25dc3219429f0e5d060061f71acb49bf010a0 \ No newline at end of file diff --git a/buildSrc/src/testKit/elasticsearch.build/licenses/hamcrest-core-LICENSE.txt b/buildSrc/src/testKit/elasticsearch.build/licenses/hamcrest-core-LICENSE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/buildSrc/src/testKit/elasticsearch.build/licenses/hamcrest-core-NOTICE.txt b/buildSrc/src/testKit/elasticsearch.build/licenses/hamcrest-core-NOTICE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/buildSrc/src/testKit/elasticsearch.build/licenses/junit-4.12.jar.sha1 b/buildSrc/src/testKit/elasticsearch.build/licenses/junit-4.12.jar.sha1 new file mode 100644 index 00000000000..94d69f8b715 --- /dev/null +++ b/buildSrc/src/testKit/elasticsearch.build/licenses/junit-4.12.jar.sha1 @@ -0,0 +1 @@ +2973d150c0dc1fefe998f834810d68f278ea58ec \ No newline at end of file diff --git a/buildSrc/src/testKit/elasticsearch.build/licenses/junit-LICENSE.txt b/buildSrc/src/testKit/elasticsearch.build/licenses/junit-LICENSE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/buildSrc/src/testKit/elasticsearch.build/licenses/junit-NOTICE.txt b/buildSrc/src/testKit/elasticsearch.build/licenses/junit-NOTICE.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/buildSrc/src/testKit/elasticsearch.build/src/main/java/org/elasticsearch/SampleClass.java b/buildSrc/src/testKit/elasticsearch.build/src/main/java/org/elasticsearch/SampleClass.java new file mode 100644 index 00000000000..defed880495 --- /dev/null +++ b/buildSrc/src/testKit/elasticsearch.build/src/main/java/org/elasticsearch/SampleClass.java @@ -0,0 +1,26 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch; + +/** + * This is just a test class + */ +public class SampleClass { + +} diff --git a/buildSrc/src/testKit/namingConventionsSelfTest/build.gradle b/buildSrc/src/testKit/namingConventionsSelfTest/build.gradle index 47e0e94b86a..b1c56ddc804 100644 --- a/buildSrc/src/testKit/namingConventionsSelfTest/build.gradle +++ b/buildSrc/src/testKit/namingConventionsSelfTest/build.gradle @@ -13,14 +13,8 @@ thirdPartyAudit.enabled = false ext.licenseFile = file("$buildDir/dummy/license") ext.noticeFile = file("$buildDir/dummy/notice") -task hello { - doFirst { - println "build plugin can be applied" - } -} - dependencies { - compile "junit:junit:${versions.junit}" + compile "junit:junit:4.12" } namingConventions { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index 30e79d1dce2..8a04c229de2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -28,10 +28,12 @@ import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.DeleteJobRequest; import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetJobRequest; +import org.elasticsearch.client.ml.GetJobStatsRequest; import org.elasticsearch.client.ml.GetRecordsRequest; import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.common.Strings; +import org.elasticsearch.client.ml.FlushJobRequest; import java.io.IOException; @@ -126,6 +128,36 @@ final class MLRequestConverters { return request; } + static Request flushJob(FlushJobRequest flushJobRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(flushJobRequest.getJobId()) + .addPathPartAsIs("_flush") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(flushJobRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request getJobStats(GetJobStatsRequest getJobStatsRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(Strings.collectionToCommaDelimitedString(getJobStatsRequest.getJobIds())) + .addPathPartAsIs("_stats") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + + RequestConverters.Params params = new RequestConverters.Params(request); + if (getJobStatsRequest.isAllowNoJobs() != null) { + params.putParam("allow_no_jobs", Boolean.toString(getJobStatsRequest.isAllowNoJobs())); + } + return request; + } + static Request getRecords(GetRecordsRequest getRecordsRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index a972f760d2f..ac44f16b80b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -19,6 +19,11 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.FlushJobResponse; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.client.ml.GetJobStatsResponse; +import org.elasticsearch.client.ml.job.stats.JobStats; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; import org.elasticsearch.client.ml.DeleteJobRequest; @@ -288,6 +293,101 @@ public final class MachineLearningClient { Collections.emptySet()); } + /** + * Flushes internally buffered data for the given Machine Learning Job ensuring all data sent to the has been processed. + * This may cause new results to be calculated depending on the contents of the buffer + * + * Both flush and close operations are similar, + * however the flush is more efficient if you are expecting to send more data for analysis. + * + * When flushing, the job remains open and is available to continue analyzing data. + * A close operation additionally prunes and persists the model state to disk and the + * job must be opened again before analyzing further data. + * + *

+ * For additional info + * see Flush ML job documentation + * + * @param request The {@link FlushJobRequest} object enclosing the `jobId` and additional request options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public FlushJobResponse flushJob(FlushJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::flushJob, + options, + FlushJobResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Flushes internally buffered data for the given Machine Learning Job asynchronously ensuring all data sent to the has been processed. + * This may cause new results to be calculated depending on the contents of the buffer + * + * Both flush and close operations are similar, + * however the flush is more efficient if you are expecting to send more data for analysis. + * + * When flushing, the job remains open and is available to continue analyzing data. + * A close operation additionally prunes and persists the model state to disk and the + * job must be opened again before analyzing further data. + * + *

+ * For additional info + * see Flush ML job documentation + * + * @param request The {@link FlushJobRequest} object enclosing the `jobId` and additional request options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void flushJobAsync(FlushJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::flushJob, + options, + FlushJobResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Gets usage statistics for one or more Machine Learning jobs + * + *

+ * For additional info + * see Get Job stats docs + *

+ * @param request {@link GetJobStatsRequest} Request containing a list of jobId(s) and additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return {@link GetJobStatsResponse} response object containing + * the {@link JobStats} objects and the number of jobs found + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public GetJobStatsResponse getJobStats(GetJobStatsRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getJobStats, + options, + GetJobStatsResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets one or more Machine Learning job configuration info, asynchronously. + * + *

+ * For additional info + * see Get Job stats docs + *

+ * @param request {@link GetJobStatsRequest} Request containing a list of jobId(s) and additional options + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified with {@link GetJobStatsResponse} upon request completion + */ + public void getJobStatsAsync(GetJobStatsRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getJobStats, + options, + GetJobStatsResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Gets the records for a Machine Learning Job. *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index e68bd266843..31832c8d9ea 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -109,7 +109,9 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.rankeval.RankEvalRequest; +import org.elasticsearch.index.reindex.AbstractBulkByScrollRequest; import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; @@ -843,6 +845,33 @@ final class RequestConverters { return request; } + static Request updateByQuery(UpdateByQueryRequest updateByQueryRequest) throws IOException { + String endpoint = + endpoint(updateByQueryRequest.indices(), updateByQueryRequest.getDocTypes(), "_update_by_query"); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + Params params = new Params(request) + .withRouting(updateByQueryRequest.getRouting()) + .withPipeline(updateByQueryRequest.getPipeline()) + .withRefresh(updateByQueryRequest.isRefresh()) + .withTimeout(updateByQueryRequest.getTimeout()) + .withWaitForActiveShards(updateByQueryRequest.getWaitForActiveShards()) + .withIndicesOptions(updateByQueryRequest.indicesOptions()); + if (updateByQueryRequest.isAbortOnVersionConflict() == false) { + params.putParam("conflicts", "proceed"); + } + if (updateByQueryRequest.getBatchSize() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_SIZE) { + params.putParam("scroll_size", Integer.toString(updateByQueryRequest.getBatchSize())); + } + if (updateByQueryRequest.getScrollTime() != AbstractBulkByScrollRequest.DEFAULT_SCROLL_TIMEOUT) { + params.putParam("scroll", updateByQueryRequest.getScrollTime()); + } + if (updateByQueryRequest.getSize() > 0) { + params.putParam("size", Integer.toString(updateByQueryRequest.getSize())); + } + request.setEntity(createEntity(updateByQueryRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request rollover(RolloverRequest rolloverRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover") .addPathPart(rolloverRequest.getNewIndexName()).build(); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 4cafdb37b69..ff5b0aae9da 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -66,6 +66,7 @@ import org.elasticsearch.index.rankeval.RankEvalRequest; import org.elasticsearch.index.rankeval.RankEvalResponse; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.plugins.spi.NamedXContentProvider; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestStatus; @@ -436,6 +437,35 @@ public class RestHighLevelClient implements Closeable { ); } + /** + * Executes a update by query request. + * See + * Update By Query API on elastic.co + * @param updateByQueryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final BulkByScrollResponse updateByQuery(UpdateByQueryRequest updateByQueryRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + updateByQueryRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, emptySet() + ); + } + + /** + * Asynchronously executes an update by query request. + * See + * Update By Query API on elastic.co + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void updateByQueryAsync(UpdateByQueryRequest reindexRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity( + reindexRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, listener, emptySet() + ); + } + /** * Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobRequest.java new file mode 100644 index 00000000000..067851d4526 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobRequest.java @@ -0,0 +1,195 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Request object to flush a given Machine Learning job. + */ +public class FlushJobRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField CALC_INTERIM = new ParseField("calc_interim"); + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + public static final ParseField ADVANCE_TIME = new ParseField("advance_time"); + public static final ParseField SKIP_TIME = new ParseField("skip_time"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("flush_job_request", (a) -> new FlushJobRequest((String) a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareBoolean(FlushJobRequest::setCalcInterim, CALC_INTERIM); + PARSER.declareString(FlushJobRequest::setStart, START); + PARSER.declareString(FlushJobRequest::setEnd, END); + PARSER.declareString(FlushJobRequest::setAdvanceTime, ADVANCE_TIME); + PARSER.declareString(FlushJobRequest::setSkipTime, SKIP_TIME); + } + + private final String jobId; + private Boolean calcInterim; + private String start; + private String end; + private String advanceTime; + private String skipTime; + + /** + * Create new Flush job request + * + * @param jobId The job ID of the job to flush + */ + public FlushJobRequest(String jobId) { + this.jobId = jobId; + } + + public String getJobId() { + return jobId; + } + + public boolean getCalcInterim() { + return calcInterim; + } + + /** + * When {@code true} calculates the interim results for the most recent bucket or all buckets within the latency period. + * + * @param calcInterim defaults to {@code false}. + */ + public void setCalcInterim(boolean calcInterim) { + this.calcInterim = calcInterim; + } + + public String getStart() { + return start; + } + + /** + * When used in conjunction with {@link FlushJobRequest#calcInterim}, + * specifies the start of the range of buckets on which to calculate interim results. + * + * @param start the beginning of the range of buckets; may be an epoch seconds, epoch millis or an ISO string + */ + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + /** + * When used in conjunction with {@link FlushJobRequest#calcInterim}, specifies the end of the range + * of buckets on which to calculate interim results + * + * @param end the end of the range of buckets; may be an epoch seconds, epoch millis or an ISO string + */ + public void setEnd(String end) { + this.end = end; + } + + public String getAdvanceTime() { + return advanceTime; + } + + /** + * Specifies to advance to a particular time value. + * Results are generated and the model is updated for data from the specified time interval. + * + * @param advanceTime String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setAdvanceTime(String advanceTime) { + this.advanceTime = advanceTime; + } + + public String getSkipTime() { + return skipTime; + } + + /** + * Specifies to skip to a particular time value. + * Results are not generated and the model is not updated for data from the specified time interval. + * + * @param skipTime String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setSkipTime(String skipTime) { + this.skipTime = skipTime; + } + + @Override + public int hashCode() { + return Objects.hash(jobId, calcInterim, start, end, advanceTime, skipTime); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + FlushJobRequest other = (FlushJobRequest) obj; + return Objects.equals(jobId, other.jobId) && + calcInterim == other.calcInterim && + Objects.equals(start, other.start) && + Objects.equals(end, other.end) && + Objects.equals(advanceTime, other.advanceTime) && + Objects.equals(skipTime, other.skipTime); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + if (calcInterim != null) { + builder.field(CALC_INTERIM.getPreferredName(), calcInterim); + } + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (advanceTime != null) { + builder.field(ADVANCE_TIME.getPreferredName(), advanceTime); + } + if (skipTime != null) { + builder.field(SKIP_TIME.getPreferredName(), skipTime); + } + builder.endObject(); + return builder; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobResponse.java new file mode 100644 index 00000000000..048b07b504a --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/FlushJobResponse.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Date; +import java.util.Objects; + +/** + * Response object containing flush acknowledgement and additional data + */ +public class FlushJobResponse extends ActionResponse implements ToXContentObject { + + public static final ParseField FLUSHED = new ParseField("flushed"); + public static final ParseField LAST_FINALIZED_BUCKET_END = new ParseField("last_finalized_bucket_end"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("flush_job_response", + true, + (a) -> { + boolean flushed = (boolean) a[0]; + Date date = a[1] == null ? null : new Date((long) a[1]); + return new FlushJobResponse(flushed, date); + }); + + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), FLUSHED); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), LAST_FINALIZED_BUCKET_END); + } + + public static FlushJobResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + private final boolean flushed; + private final Date lastFinalizedBucketEnd; + + public FlushJobResponse(boolean flushed, @Nullable Date lastFinalizedBucketEnd) { + this.flushed = flushed; + this.lastFinalizedBucketEnd = lastFinalizedBucketEnd; + } + + /** + * Was the job successfully flushed or not + */ + public boolean isFlushed() { + return flushed; + } + + /** + * Provides the timestamp (in milliseconds-since-the-epoch) of the end of the last bucket that was processed. + */ + @Nullable + public Date getLastFinalizedBucketEnd() { + return lastFinalizedBucketEnd; + } + + @Override + public int hashCode() { + return Objects.hash(flushed, lastFinalizedBucketEnd); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + FlushJobResponse that = (FlushJobResponse) other; + return that.flushed == flushed && Objects.equals(lastFinalizedBucketEnd, that.lastFinalizedBucketEnd); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(FLUSHED.getPreferredName(), flushed); + if (lastFinalizedBucketEnd != null) { + builder.timeField(LAST_FINALIZED_BUCKET_END.getPreferredName(), + LAST_FINALIZED_BUCKET_END.getPreferredName() + "_string", lastFinalizedBucketEnd.getTime()); + } + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsRequest.java new file mode 100644 index 00000000000..d8eb350755d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsRequest.java @@ -0,0 +1,146 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + + +/** + * Request object to get {@link org.elasticsearch.client.ml.job.stats.JobStats} by their respective jobIds + * + * `_all` explicitly gets all the jobs' statistics in the cluster + * An empty request (no `jobId`s) implicitly gets all the jobs' statistics in the cluster + */ +public class GetJobStatsRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField ALLOW_NO_JOBS = new ParseField("allow_no_jobs"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_jobs_stats_request", a -> new GetJobStatsRequest((List) a[0])); + + static { + PARSER.declareField(ConstructingObjectParser.constructorArg(), + p -> Arrays.asList(Strings.commaDelimitedListToStringArray(p.text())), + Job.ID, ObjectParser.ValueType.STRING_ARRAY); + PARSER.declareBoolean(GetJobStatsRequest::setAllowNoJobs, ALLOW_NO_JOBS); + } + + private static final String ALL_JOBS = "_all"; + + private final List jobIds; + private Boolean allowNoJobs; + + /** + * Explicitly gets all jobs statistics + * + * @return a {@link GetJobStatsRequest} for all existing jobs + */ + public static GetJobStatsRequest getAllJobStatsRequest(){ + return new GetJobStatsRequest(ALL_JOBS); + } + + GetJobStatsRequest(List jobIds) { + if (jobIds.stream().anyMatch(Objects::isNull)) { + throw new NullPointerException("jobIds must not contain null values"); + } + this.jobIds = new ArrayList<>(jobIds); + } + + /** + * Get the specified Job's statistics via their unique jobIds + * + * @param jobIds must be non-null and each jobId must be non-null + */ + public GetJobStatsRequest(String... jobIds) { + this(Arrays.asList(jobIds)); + } + + /** + * All the jobIds for which to get statistics + */ + public List getJobIds() { + return jobIds; + } + + public Boolean isAllowNoJobs() { + return this.allowNoJobs; + } + + /** + * Whether to ignore if a wildcard expression matches no jobs. + * + * This includes `_all` string or when no jobs have been specified + * + * @param allowNoJobs When {@code true} ignore if wildcard or `_all` matches no jobs. Defaults to {@code true} + */ + public void setAllowNoJobs(boolean allowNoJobs) { + this.allowNoJobs = allowNoJobs; + } + + @Override + public int hashCode() { + return Objects.hash(jobIds, allowNoJobs); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + GetJobStatsRequest that = (GetJobStatsRequest) other; + return Objects.equals(jobIds, that.jobIds) && + Objects.equals(allowNoJobs, that.allowNoJobs); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), Strings.collectionToCommaDelimitedString(jobIds)); + if (allowNoJobs != null) { + builder.field(ALLOW_NO_JOBS.getPreferredName(), allowNoJobs); + } + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsResponse.java new file mode 100644 index 00000000000..2e3ba113d19 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetJobStatsResponse.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.ml.job.stats.JobStats; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Contains a {@link List} of the found {@link JobStats} objects and the total count found + */ +public class GetJobStatsResponse extends AbstractResultResponse { + + public static final ParseField RESULTS_FIELD = new ParseField("jobs"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("jobs_stats_response", true, + a -> new GetJobStatsResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(constructorArg(), JobStats.PARSER, RESULTS_FIELD); + PARSER.declareLong(constructorArg(), COUNT); + } + + GetJobStatsResponse(List jobStats, long count) { + super(RESULTS_FIELD, jobStats, count); + } + + /** + * The collection of {@link JobStats} objects found in the query + */ + public List jobStats() { + return results; + } + + public static GetJobStatsResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public int hashCode() { + return Objects.hash(results, count); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + GetJobStatsResponse other = (GetJobStatsResponse) obj; + return Objects.equals(results, other.results) && count == other.count; + } + + @Override + public final String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/NodeAttributes.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/NodeAttributes.java new file mode 100644 index 00000000000..892df340abd --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/NodeAttributes.java @@ -0,0 +1,150 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +/** + * A Pojo class containing an Elastic Node's attributes + */ +public class NodeAttributes implements ToXContentObject { + + public static final ParseField ID = new ParseField("id"); + public static final ParseField NAME = new ParseField("name"); + public static final ParseField EPHEMERAL_ID = new ParseField("ephemeral_id"); + public static final ParseField TRANSPORT_ADDRESS = new ParseField("transport_address"); + public static final ParseField ATTRIBUTES = new ParseField("attributes"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("node", true, + (a) -> { + int i = 0; + String id = (String) a[i++]; + String name = (String) a[i++]; + String ephemeralId = (String) a[i++]; + String transportAddress = (String) a[i++]; + Map attributes = (Map) a[i]; + return new NodeAttributes(id, name, ephemeralId, transportAddress, attributes); + }); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME); + PARSER.declareString(ConstructingObjectParser.constructorArg(), EPHEMERAL_ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), TRANSPORT_ADDRESS); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p, c) -> p.mapStrings(), + ATTRIBUTES, + ObjectParser.ValueType.OBJECT); + } + + private final String id; + private final String name; + private final String ephemeralId; + private final String transportAddress; + private final Map attributes; + + public NodeAttributes(String id, String name, String ephemeralId, String transportAddress, Map attributes) { + this.id = id; + this.name = name; + this.ephemeralId = ephemeralId; + this.transportAddress = transportAddress; + this.attributes = Collections.unmodifiableMap(attributes); + } + + /** + * The unique identifier of the node. + */ + public String getId() { + return id; + } + + /** + * The node name. + */ + public String getName() { + return name; + } + + /** + * The ephemeral id of the node. + */ + public String getEphemeralId() { + return ephemeralId; + } + + /** + * The host and port where transport HTTP connections are accepted. + */ + public String getTransportAddress() { + return transportAddress; + } + + /** + * Additional attributes related to this node e.g., {"ml.max_open_jobs": "10"}. + */ + public Map getAttributes() { + return attributes; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID.getPreferredName(), id); + builder.field(NAME.getPreferredName(), name); + builder.field(EPHEMERAL_ID.getPreferredName(), ephemeralId); + builder.field(TRANSPORT_ADDRESS.getPreferredName(), transportAddress); + builder.field(ATTRIBUTES.getPreferredName(), attributes); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(id, name, ephemeralId, transportAddress, attributes); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + NodeAttributes that = (NodeAttributes) other; + return Objects.equals(id, that.id) && + Objects.equals(name, that.name) && + Objects.equals(ephemeralId, that.ephemeralId) && + Objects.equals(transportAddress, that.transportAddress) && + Objects.equals(attributes, that.attributes); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobState.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobState.java new file mode 100644 index 00000000000..32684bd7e62 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/JobState.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.config; + +import java.util.Locale; + +/** + * Jobs whether running or complete are in one of these states. + * When a job is created it is initialised in the state closed + * i.e. it is not running. + */ +public enum JobState { + + CLOSING, CLOSED, OPENED, FAILED, OPENING; + + public static JobState fromString(String name) { + return valueOf(name.trim().toUpperCase(Locale.ROOT)); + } + + public String value() { + return name().toLowerCase(Locale.ROOT); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/ForecastStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/ForecastStats.java new file mode 100644 index 00000000000..a6b41beca83 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/ForecastStats.java @@ -0,0 +1,174 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * A class to hold statistics about forecasts. + */ +public class ForecastStats implements ToXContentObject { + + public static final ParseField TOTAL = new ParseField("total"); + public static final ParseField FORECASTED_JOBS = new ParseField("forecasted_jobs"); + public static final ParseField MEMORY_BYTES = new ParseField("memory_bytes"); + public static final ParseField PROCESSING_TIME_MS = new ParseField("processing_time_ms"); + public static final ParseField RECORDS = new ParseField("records"); + public static final ParseField STATUS = new ParseField("status"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("forecast_stats", + true, + (a) -> { + int i = 0; + long total = (long)a[i++]; + SimpleStats memoryStats = (SimpleStats)a[i++]; + SimpleStats recordStats = (SimpleStats)a[i++]; + SimpleStats runtimeStats = (SimpleStats)a[i++]; + Map statusCounts = (Map)a[i]; + return new ForecastStats(total, memoryStats, recordStats, runtimeStats, statusCounts); + }); + + static { + PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SimpleStats.PARSER, MEMORY_BYTES); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SimpleStats.PARSER, RECORDS); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), SimpleStats.PARSER, PROCESSING_TIME_MS); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + p -> { + Map counts = new HashMap<>(); + p.map().forEach((key, value) -> counts.put(key, ((Number)value).longValue())); + return counts; + }, STATUS, ObjectParser.ValueType.OBJECT); + } + + private final long total; + private final long forecastedJobs; + private SimpleStats memoryStats; + private SimpleStats recordStats; + private SimpleStats runtimeStats; + private Map statusCounts; + + public ForecastStats(long total, + SimpleStats memoryStats, + SimpleStats recordStats, + SimpleStats runtimeStats, + Map statusCounts) { + this.total = total; + this.forecastedJobs = total > 0 ? 1 : 0; + if (total > 0) { + this.memoryStats = Objects.requireNonNull(memoryStats); + this.recordStats = Objects.requireNonNull(recordStats); + this.runtimeStats = Objects.requireNonNull(runtimeStats); + this.statusCounts = Collections.unmodifiableMap(statusCounts); + } + } + + /** + * The number of forecasts currently available for this model. + */ + public long getTotal() { + return total; + } + + /** + * The number of jobs that have at least one forecast. + */ + public long getForecastedJobs() { + return forecastedJobs; + } + + /** + * Statistics about the memory usage: minimum, maximum, average and total. + */ + public SimpleStats getMemoryStats() { + return memoryStats; + } + + /** + * Statistics about the number of forecast records: minimum, maximum, average and total. + */ + public SimpleStats getRecordStats() { + return recordStats; + } + + /** + * Statistics about the forecast runtime in milliseconds: minimum, maximum, average and total + */ + public SimpleStats getRuntimeStats() { + return runtimeStats; + } + + /** + * Counts per forecast status, for example: {"finished" : 2}. + */ + public Map getStatusCounts() { + return statusCounts; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(TOTAL.getPreferredName(), total); + builder.field(FORECASTED_JOBS.getPreferredName(), forecastedJobs); + + if (total > 0) { + builder.field(MEMORY_BYTES.getPreferredName(), memoryStats); + builder.field(RECORDS.getPreferredName(), recordStats); + builder.field(PROCESSING_TIME_MS.getPreferredName(), runtimeStats); + builder.field(STATUS.getPreferredName(), statusCounts); + } + return builder.endObject(); + } + + @Override + public int hashCode() { + return Objects.hash(total, forecastedJobs, memoryStats, recordStats, runtimeStats, statusCounts); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + ForecastStats other = (ForecastStats) obj; + return Objects.equals(total, other.total) && + Objects.equals(forecastedJobs, other.forecastedJobs) && + Objects.equals(memoryStats, other.memoryStats) && + Objects.equals(recordStats, other.recordStats) && + Objects.equals(runtimeStats, other.runtimeStats) && + Objects.equals(statusCounts, other.statusCounts); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/JobStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/JobStats.java new file mode 100644 index 00000000000..df5be4aa4c5 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/JobStats.java @@ -0,0 +1,225 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.config.JobState; +import org.elasticsearch.client.ml.job.process.DataCounts; +import org.elasticsearch.client.ml.job.process.ModelSizeStats; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.client.ml.NodeAttributes; + +import java.io.IOException; +import java.util.Objects; + +/** + * Class containing the statistics for a Machine Learning job. + * + */ +public class JobStats implements ToXContentObject { + + private static final ParseField DATA_COUNTS = new ParseField("data_counts"); + private static final ParseField MODEL_SIZE_STATS = new ParseField("model_size_stats"); + private static final ParseField FORECASTS_STATS = new ParseField("forecasts_stats"); + private static final ParseField STATE = new ParseField("state"); + private static final ParseField NODE = new ParseField("node"); + private static final ParseField OPEN_TIME = new ParseField("open_time"); + private static final ParseField ASSIGNMENT_EXPLANATION = new ParseField("assignment_explanation"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("job_stats", + true, + (a) -> { + int i = 0; + String jobId = (String) a[i++]; + DataCounts dataCounts = (DataCounts) a[i++]; + JobState jobState = (JobState) a[i++]; + ModelSizeStats.Builder modelSizeStatsBuilder = (ModelSizeStats.Builder) a[i++]; + ModelSizeStats modelSizeStats = modelSizeStatsBuilder == null ? null : modelSizeStatsBuilder.build(); + ForecastStats forecastStats = (ForecastStats) a[i++]; + NodeAttributes node = (NodeAttributes) a[i++]; + String assignmentExplanation = (String) a[i++]; + TimeValue openTime = (TimeValue) a[i]; + return new JobStats(jobId, + dataCounts, + jobState, + modelSizeStats, + forecastStats, + node, + assignmentExplanation, + openTime); + }); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), DataCounts.PARSER, DATA_COUNTS); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p) -> JobState.fromString(p.text()), + STATE, + ObjectParser.ValueType.VALUE); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ModelSizeStats.PARSER, MODEL_SIZE_STATS); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), ForecastStats.PARSER, FORECASTS_STATS); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), NodeAttributes.PARSER, NODE); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), ASSIGNMENT_EXPLANATION); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.textOrNull(), OPEN_TIME.getPreferredName()), + OPEN_TIME, + ObjectParser.ValueType.STRING_OR_NULL); + } + + + private final String jobId; + private final DataCounts dataCounts; + private final JobState state; + private final ModelSizeStats modelSizeStats; + private final ForecastStats forecastStats; + private final NodeAttributes node; + private final String assignmentExplanation; + private final TimeValue openTime; + + JobStats(String jobId, DataCounts dataCounts, JobState state, @Nullable ModelSizeStats modelSizeStats, + @Nullable ForecastStats forecastStats, @Nullable NodeAttributes node, + @Nullable String assignmentExplanation, @Nullable TimeValue opentime) { + this.jobId = Objects.requireNonNull(jobId); + this.dataCounts = Objects.requireNonNull(dataCounts); + this.state = Objects.requireNonNull(state); + this.modelSizeStats = modelSizeStats; + this.forecastStats = forecastStats; + this.node = node; + this.assignmentExplanation = assignmentExplanation; + this.openTime = opentime; + } + + /** + * The jobId referencing the job for these statistics + */ + public String getJobId() { + return jobId; + } + + /** + * An object that describes the number of records processed and any related error counts + * See {@link DataCounts} + */ + public DataCounts getDataCounts() { + return dataCounts; + } + + /** + * An object that provides information about the size and contents of the model. + * See {@link ModelSizeStats} + */ + public ModelSizeStats getModelSizeStats() { + return modelSizeStats; + } + + /** + * An object that provides statistical information about forecasts of this job. + * See {@link ForecastStats} + */ + public ForecastStats getForecastStats() { + return forecastStats; + } + + /** + * The status of the job + * See {@link JobState} + */ + public JobState getState() { + return state; + } + + /** + * For open jobs only, contains information about the node where the job runs + * See {@link NodeAttributes} + */ + public NodeAttributes getNode() { + return node; + } + + /** + * For open jobs only, contains messages relating to the selection of a node to run the job. + */ + public String getAssignmentExplanation() { + return assignmentExplanation; + } + + /** + * For open jobs only, the elapsed time for which the job has been open + */ + public TimeValue getOpenTime() { + return openTime; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(DATA_COUNTS.getPreferredName(), dataCounts); + builder.field(STATE.getPreferredName(), state.toString()); + if (modelSizeStats != null) { + builder.field(MODEL_SIZE_STATS.getPreferredName(), modelSizeStats); + } + if (forecastStats != null) { + builder.field(FORECASTS_STATS.getPreferredName(), forecastStats); + } + if (node != null) { + builder.field(NODE.getPreferredName(), node); + } + if (assignmentExplanation != null) { + builder.field(ASSIGNMENT_EXPLANATION.getPreferredName(), assignmentExplanation); + } + if (openTime != null) { + builder.field(OPEN_TIME.getPreferredName(), openTime.getStringRep()); + } + return builder.endObject(); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, dataCounts, modelSizeStats, forecastStats, state, node, assignmentExplanation, openTime); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + JobStats other = (JobStats) obj; + return Objects.equals(jobId, other.jobId) && + Objects.equals(this.dataCounts, other.dataCounts) && + Objects.equals(this.modelSizeStats, other.modelSizeStats) && + Objects.equals(this.forecastStats, other.forecastStats) && + Objects.equals(this.state, other.state) && + Objects.equals(this.node, other.node) && + Objects.equals(this.assignmentExplanation, other.assignmentExplanation) && + Objects.equals(this.openTime, other.openTime); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/SimpleStats.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/SimpleStats.java new file mode 100644 index 00000000000..f4c8aa0fa3b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/stats/SimpleStats.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Helper class for min, max, avg and total statistics for a quantity + */ +public class SimpleStats implements ToXContentObject { + + public static final ParseField MIN = new ParseField("min"); + public static final ParseField MAX = new ParseField("max"); + public static final ParseField AVG = new ParseField("avg"); + public static final ParseField TOTAL = new ParseField("total"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("simple_stats", true, + (a) -> { + int i = 0; + double total = (double)a[i++]; + double min = (double)a[i++]; + double max = (double)a[i++]; + double avg = (double)a[i++]; + return new SimpleStats(total, min, max, avg); + }); + + static { + PARSER.declareDouble(ConstructingObjectParser.constructorArg(), TOTAL); + PARSER.declareDouble(ConstructingObjectParser.constructorArg(), MIN); + PARSER.declareDouble(ConstructingObjectParser.constructorArg(), MAX); + PARSER.declareDouble(ConstructingObjectParser.constructorArg(), AVG); + } + + private final double total; + private final double min; + private final double max; + private final double avg; + + SimpleStats(double total, double min, double max, double avg) { + this.total = total; + this.min = min; + this.max = max; + this.avg = avg; + } + + public double getMin() { + return min; + } + + public double getMax() { + return max; + } + + public double getAvg() { + return avg; + } + + public double getTotal() { + return total; + } + + @Override + public int hashCode() { + return Objects.hash(total, min, max, avg); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + SimpleStats other = (SimpleStats) obj; + return Objects.equals(total, other.total) && + Objects.equals(min, other.min) && + Objects.equals(avg, other.avg) && + Objects.equals(max, other.max); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MIN.getPreferredName(), min); + builder.field(MAX.getPreferredName(), max); + builder.field(AVG.getPreferredName(), avg); + builder.field(TOTAL.getPreferredName(), total); + builder.endObject(); + return builder; + } +} + diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index 7978d76c56d..e02d9f451eb 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -51,6 +51,7 @@ import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.query.IdsQueryBuilder; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -691,6 +692,72 @@ public class CrudIT extends ESRestHighLevelClientTestCase { } } + public void testUpdateByQuery() throws IOException { + final String sourceIndex = "source1"; + { + // Prepare + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(sourceIndex, settings); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + new BulkRequest() + .add(new IndexRequest(sourceIndex, "type", "1") + .source(Collections.singletonMap("foo", 1), XContentType.JSON)) + .add(new IndexRequest(sourceIndex, "type", "2") + .source(Collections.singletonMap("foo", 2), XContentType.JSON)) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT + ).status() + ); + } + { + // test1: create one doc in dest + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(sourceIndex); + updateByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1").types("type")); + updateByQueryRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = + execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync); + assertEquals(1, bulkResponse.getTotal()); + assertEquals(1, bulkResponse.getUpdated()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + } + { + // test2: update using script + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(sourceIndex); + updateByQueryRequest.setScript(new Script("if (ctx._source.foo == 2) ctx._source.foo++;")); + updateByQueryRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = + execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync); + assertEquals(2, bulkResponse.getTotal()); + assertEquals(2, bulkResponse.getUpdated()); + assertEquals(0, bulkResponse.getDeleted()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + assertEquals( + 3, + (int) (highLevelClient().get(new GetRequest(sourceIndex, "type", "2"), RequestOptions.DEFAULT) + .getSourceAsMap().get("foo")) + ); + } + } + public void testBulkProcessorIntegration() throws IOException { int nbItems = randomIntBetween(10, 100); boolean[] errors = new boolean[nbItems]; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index 43f3ef41a8d..d84099d9a3c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -36,6 +36,8 @@ import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.GetJobStatsRequest; import org.elasticsearch.test.ESTestCase; import java.io.ByteArrayOutputStream; @@ -139,6 +141,44 @@ public class MLRequestConvertersTests extends ESTestCase { } } + public void testFlushJob() throws Exception { + String jobId = randomAlphaOfLength(10); + FlushJobRequest flushJobRequest = new FlushJobRequest(jobId); + + Request request = MLRequestConverters.flushJob(flushJobRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/_flush", request.getEndpoint()); + assertEquals("{\"job_id\":\"" + jobId + "\"}", requestEntityToString(request)); + + flushJobRequest.setSkipTime("1000"); + flushJobRequest.setStart("105"); + flushJobRequest.setEnd("200"); + flushJobRequest.setAdvanceTime("100"); + flushJobRequest.setCalcInterim(true); + request = MLRequestConverters.flushJob(flushJobRequest); + assertEquals( + "{\"job_id\":\"" + jobId + "\",\"calc_interim\":true,\"start\":\"105\"," + + "\"end\":\"200\",\"advance_time\":\"100\",\"skip_time\":\"1000\"}", + requestEntityToString(request)); + } + + public void testGetJobStats() { + GetJobStatsRequest getJobStatsRequestRequest = new GetJobStatsRequest(); + + Request request = MLRequestConverters.getJobStats(getJobStatsRequestRequest); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/_stats", request.getEndpoint()); + assertFalse(request.getParameters().containsKey("allow_no_jobs")); + + getJobStatsRequestRequest = new GetJobStatsRequest("job1", "jobs*"); + getJobStatsRequestRequest.setAllowNoJobs(true); + request = MLRequestConverters.getJobStats(getJobStatsRequestRequest); + + assertEquals("/_xpack/ml/anomaly_detectors/job1,jobs*/_stats", request.getEndpoint()); + assertEquals(Boolean.toString(true), request.getParameters().get("allow_no_jobs")); + } + private static Job createValidJob(String jobId) { AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList( Detector.builder().setFunction("count").build())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index cb9dbea129d..cd4b6ffc769 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -19,6 +19,12 @@ package org.elasticsearch.client; import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.client.ml.GetJobStatsResponse; +import org.elasticsearch.client.ml.job.config.JobState; +import org.elasticsearch.client.ml.job.stats.JobStats; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; import org.elasticsearch.client.ml.DeleteJobRequest; @@ -34,6 +40,8 @@ import org.elasticsearch.client.ml.job.config.DataDescription; import org.elasticsearch.client.ml.job.config.Detector; import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.FlushJobResponse; import org.junit.After; import java.io.IOException; @@ -41,6 +49,7 @@ import java.util.Arrays; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasSize; @@ -138,6 +147,77 @@ public class MachineLearningIT extends ESRestHighLevelClientTestCase { assertTrue(response.isClosed()); } + public void testFlushJob() throws Exception { + String jobId = randomValidJobId(); + Job job = buildJob(jobId); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + machineLearningClient.openJob(new OpenJobRequest(jobId), RequestOptions.DEFAULT); + + FlushJobResponse response = execute(new FlushJobRequest(jobId), + machineLearningClient::flushJob, + machineLearningClient::flushJobAsync); + assertTrue(response.isFlushed()); + } + + public void testGetJobStats() throws Exception { + String jobId1 = "ml-get-job-stats-test-id-1"; + String jobId2 = "ml-get-job-stats-test-id-2"; + + Job job1 = buildJob(jobId1); + Job job2 = buildJob(jobId2); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putJob(new PutJobRequest(job1), RequestOptions.DEFAULT); + machineLearningClient.putJob(new PutJobRequest(job2), RequestOptions.DEFAULT); + + machineLearningClient.openJob(new OpenJobRequest(jobId1), RequestOptions.DEFAULT); + + GetJobStatsRequest request = new GetJobStatsRequest(jobId1, jobId2); + + // Test getting specific + GetJobStatsResponse response = execute(request, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync); + + assertEquals(2, response.count()); + assertThat(response.jobStats(), hasSize(2)); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), containsInAnyOrder(jobId1, jobId2)); + for (JobStats stats : response.jobStats()) { + if (stats.getJobId().equals(jobId1)) { + assertEquals(JobState.OPENED, stats.getState()); + } else { + assertEquals(JobState.CLOSED, stats.getState()); + } + } + + // Test getting all explicitly + request = GetJobStatsRequest.getAllJobStatsRequest(); + response = execute(request, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync); + + assertTrue(response.count() >= 2L); + assertTrue(response.jobStats().size() >= 2L); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + + // Test getting all implicitly + response = execute(new GetJobStatsRequest(), machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync); + + assertTrue(response.count() >= 2L); + assertTrue(response.jobStats().size() >= 2L); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + + // Test getting all with wildcard + request = new GetJobStatsRequest("ml-get-job-stats-test-id-*"); + response = execute(request, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync); + assertTrue(response.count() >= 2L); + assertTrue(response.jobStats().size() >= 2L); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), hasItems(jobId1, jobId2)); + + // Test when allow_no_jobs is false + final GetJobStatsRequest erroredRequest = new GetJobStatsRequest("jobs-that-do-not-exist*"); + erroredRequest.setAllowNoJobs(false); + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, + () -> execute(erroredRequest, machineLearningClient::getJobStats, machineLearningClient::getJobStatsAsync)); + assertThat(exception.status().getStatus(), equalTo(404)); + } + public static String randomValidJobId() { CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray()); return generator.ofCodePointsLength(random(), 10, 10); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 4a318b8ddff..f814a80374c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -132,6 +132,7 @@ import org.elasticsearch.index.rankeval.RatedRequest; import org.elasticsearch.index.rankeval.RestRankEvalAction; import org.elasticsearch.index.reindex.ReindexRequest; import org.elasticsearch.index.reindex.RemoteInfo; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.indexlifecycle.ExplainLifecycleRequest; import org.elasticsearch.protocol.xpack.indexlifecycle.SetIndexLifecyclePolicyRequest; @@ -144,6 +145,7 @@ import org.elasticsearch.protocol.xpack.graph.Hop; import org.elasticsearch.protocol.xpack.watcher.PutWatchRequest; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.mustache.MultiSearchTemplateRequest; import org.elasticsearch.script.mustache.SearchTemplateRequest; @@ -478,6 +480,60 @@ public class RequestConvertersTests extends ESTestCase { assertToXContentBody(reindexRequest, request.getEntity()); } + public void testUpdateByQuery() throws IOException { + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(randomIndicesNames(1, 5)); + Map expectedParams = new HashMap<>(); + if (randomBoolean()) { + updateByQueryRequest.setDocTypes(generateRandomStringArray(5, 5, false, false)); + } + if (randomBoolean()) { + int batchSize = randomInt(100); + updateByQueryRequest.setBatchSize(batchSize); + expectedParams.put("scroll_size", Integer.toString(batchSize)); + } + if (randomBoolean()) { + updateByQueryRequest.setPipeline("my_pipeline"); + expectedParams.put("pipeline", "my_pipeline"); + } + if (randomBoolean()) { + updateByQueryRequest.setRouting("=cat"); + expectedParams.put("routing", "=cat"); + } + if (randomBoolean()) { + int size = randomIntBetween(100, 1000); + updateByQueryRequest.setSize(size); + expectedParams.put("size", Integer.toString(size)); + } + if (randomBoolean()) { + updateByQueryRequest.setAbortOnVersionConflict(false); + expectedParams.put("conflicts", "proceed"); + } + if (randomBoolean()) { + String ts = randomTimeValue(); + updateByQueryRequest.setScroll(TimeValue.parseTimeValue(ts, "scroll")); + expectedParams.put("scroll", ts); + } + if (randomBoolean()) { + updateByQueryRequest.setQuery(new TermQueryBuilder("foo", "fooval")); + } + if (randomBoolean()) { + updateByQueryRequest.setScript(new Script("ctx._source.last = \"lastname\"")); + } + setRandomIndicesOptions(updateByQueryRequest::setIndicesOptions, updateByQueryRequest::indicesOptions, expectedParams); + setRandomTimeout(updateByQueryRequest::setTimeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams); + Request request = RequestConverters.updateByQuery(updateByQueryRequest); + StringJoiner joiner = new StringJoiner("/", "/", ""); + joiner.add(String.join(",", updateByQueryRequest.indices())); + if (updateByQueryRequest.getDocTypes().length > 0) + joiner.add(String.join(",", updateByQueryRequest.getDocTypes())); + joiner.add("_update_by_query"); + assertEquals(joiner.toString(), request.getEndpoint()); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(updateByQueryRequest, request.getEntity()); + } + public void testPutMapping() throws IOException { PutMappingRequest putMappingRequest = new PutMappingRequest(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index a0319573eb9..d4dac287d99 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -664,8 +664,7 @@ public class RestHighLevelClientTests extends ESTestCase { "render_search_template", "scripts_painless_execute", "tasks.get", - "termvectors", - "update_by_query" + "termvectors" }; //These API are not required for high-level client feature completeness String[] notRequiredApi = new String[] { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index 9c69a2a4836..ac9d42c65ca 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -39,6 +39,7 @@ import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.replication.ReplicationResponse; @@ -67,6 +68,7 @@ import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.ReindexRequest; import org.elasticsearch.index.reindex.RemoteInfo; import org.elasticsearch.index.reindex.ScrollableHitSource; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -899,6 +901,125 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { } } + public void testUpdateByQuery() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + String mapping = + "\"doc\": {\n" + + " \"properties\": {\n" + + " \"user\": {\n" + + " \"type\": \"text\"\n" + + " },\n" + + " \"field1\": {\n" + + " \"type\": \"integer\"\n" + + " },\n" + + " \"field2\": {\n" + + " \"type\": \"integer\"\n" + + " }\n" + + " }\n" + + " }"; + createIndex("source1", Settings.EMPTY, mapping); + createIndex("source2", Settings.EMPTY, mapping); + createPipeline("my_pipeline"); + } + { + // tag::update-by-query-request + UpdateByQueryRequest request = new UpdateByQueryRequest("source1", "source2"); // <1> + // end::update-by-query-request + // tag::update-by-query-request-conflicts + request.setConflicts("proceed"); // <1> + // end::update-by-query-request-conflicts + // tag::update-by-query-request-typeOrQuery + request.setDocTypes("doc"); // <1> + request.setQuery(new TermQueryBuilder("user", "kimchy")); // <2> + // end::update-by-query-request-typeOrQuery + // tag::update-by-query-request-size + request.setSize(10); // <1> + // end::update-by-query-request-size + // tag::update-by-query-request-scrollSize + request.setBatchSize(100); // <1> + // end::update-by-query-request-scrollSize + // tag::update-by-query-request-pipeline + request.setPipeline("my_pipeline"); // <1> + // end::update-by-query-request-pipeline + // tag::update-by-query-request-script + request.setScript( + new Script( + ScriptType.INLINE, "painless", + "if (ctx._source.user == 'kimchy') {ctx._source.likes++;}", + Collections.emptyMap())); // <1> + // end::update-by-query-request-script + // tag::update-by-query-request-timeout + request.setTimeout(TimeValue.timeValueMinutes(2)); // <1> + // end::update-by-query-request-timeout + // tag::update-by-query-request-refresh + request.setRefresh(true); // <1> + // end::update-by-query-request-refresh + // tag::update-by-query-request-slices + request.setSlices(2); // <1> + // end::update-by-query-request-slices + // tag::update-by-query-request-scroll + request.setScroll(TimeValue.timeValueMinutes(10)); // <1> + // end::update-by-query-request-scroll + // tag::update-by-query-request-routing + request.setRouting("=cat"); // <1> + // end::update-by-query-request-routing + // tag::update-by-query-request-indicesOptions + request.setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); // <1> + // end::update-by-query-request-indicesOptions + + // tag::update-by-query-execute + BulkByScrollResponse bulkResponse = client.updateByQuery(request, RequestOptions.DEFAULT); + // end::update-by-query-execute + assertSame(0, bulkResponse.getSearchFailures().size()); + assertSame(0, bulkResponse.getBulkFailures().size()); + // tag::update-by-query-response + TimeValue timeTaken = bulkResponse.getTook(); // <1> + boolean timedOut = bulkResponse.isTimedOut(); // <2> + long totalDocs = bulkResponse.getTotal(); // <3> + long updatedDocs = bulkResponse.getUpdated(); // <4> + long deletedDocs = bulkResponse.getDeleted(); // <5> + long batches = bulkResponse.getBatches(); // <6> + long noops = bulkResponse.getNoops(); // <7> + long versionConflicts = bulkResponse.getVersionConflicts(); // <8> + long bulkRetries = bulkResponse.getBulkRetries(); // <9> + long searchRetries = bulkResponse.getSearchRetries(); // <10> + TimeValue throttledMillis = bulkResponse.getStatus().getThrottled(); // <11> + TimeValue throttledUntilMillis = bulkResponse.getStatus().getThrottledUntil(); // <12> + List searchFailures = bulkResponse.getSearchFailures(); // <13> + List bulkFailures = bulkResponse.getBulkFailures(); // <14> + // end::update-by-query-response + } + { + UpdateByQueryRequest request = new UpdateByQueryRequest(); + request.indices("source1"); + + // tag::update-by-query-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(BulkByScrollResponse bulkResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::update-by-query-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::update-by-query-execute-async + client.updateByQueryAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::update-by-query-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testGet() throws Exception { RestHighLevelClient client = highLevelClient(); { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 94793f0ab79..f92f01f6bad 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -35,6 +35,8 @@ import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsResponse; import org.elasticsearch.client.ml.GetJobRequest; import org.elasticsearch.client.ml.GetJobResponse; +import org.elasticsearch.client.ml.GetJobStatsRequest; +import org.elasticsearch.client.ml.GetJobStatsResponse; import org.elasticsearch.client.ml.GetRecordsRequest; import org.elasticsearch.client.ml.GetRecordsResponse; import org.elasticsearch.client.ml.OpenJobRequest; @@ -50,6 +52,9 @@ import org.elasticsearch.client.ml.job.results.Bucket; import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.client.ml.FlushJobRequest; +import org.elasticsearch.client.ml.FlushJobResponse; +import org.elasticsearch.client.ml.job.stats.JobStats; import org.junit.After; import java.io.IOException; @@ -458,6 +463,127 @@ public class MlClientDocumentationIT extends ESRestHighLevelClientTestCase { } } + public void testFlushJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + + Job job = MachineLearningIT.buildJob("flushing-my-first-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(job.getId()), RequestOptions.DEFAULT); + + Job secondJob = MachineLearningIT.buildJob("flushing-my-second-machine-learning-job"); + client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT); + client.machineLearning().openJob(new OpenJobRequest(secondJob.getId()), RequestOptions.DEFAULT); + + { + //tag::x-pack-ml-flush-job-request + FlushJobRequest flushJobRequest = new FlushJobRequest("flushing-my-first-machine-learning-job"); //<1> + //end::x-pack-ml-flush-job-request + + //tag::x-pack-ml-flush-job-request-options + flushJobRequest.setCalcInterim(true); //<1> + flushJobRequest.setAdvanceTime("2018-08-31T16:35:07+00:00"); //<2> + flushJobRequest.setStart("2018-08-31T16:35:17+00:00"); //<3> + flushJobRequest.setEnd("2018-08-31T16:35:27+00:00"); //<4> + flushJobRequest.setSkipTime("2018-08-31T16:35:00+00:00"); //<5> + //end::x-pack-ml-flush-job-request-options + + //tag::x-pack-ml-flush-job-execute + FlushJobResponse flushJobResponse = client.machineLearning().flushJob(flushJobRequest, RequestOptions.DEFAULT); + //end::x-pack-ml-flush-job-execute + + //tag::x-pack-ml-flush-job-response + boolean isFlushed = flushJobResponse.isFlushed(); //<1> + Date lastFinalizedBucketEnd = flushJobResponse.getLastFinalizedBucketEnd(); //<2> + //end::x-pack-ml-flush-job-response + + } + { + //tag::x-pack-ml-flush-job-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(FlushJobResponse FlushJobResponse) { + //<1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::x-pack-ml-flush-job-listener + FlushJobRequest flushJobRequest = new FlushJobRequest("flushing-my-second-machine-learning-job"); + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-flush-job-execute-async + client.machineLearning().flushJobAsync(flushJobRequest, RequestOptions.DEFAULT, listener); //<1> + // end::x-pack-ml-flush-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + + public void testGetJobStats() throws Exception { + RestHighLevelClient client = highLevelClient(); + + Job job = MachineLearningIT.buildJob("get-machine-learning-job-stats1"); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + Job secondJob = MachineLearningIT.buildJob("get-machine-learning-job-stats2"); + client.machineLearning().putJob(new PutJobRequest(secondJob), RequestOptions.DEFAULT); + + { + //tag::x-pack-ml-get-job-stats-request + GetJobStatsRequest request = new GetJobStatsRequest("get-machine-learning-job-stats1", "get-machine-learning-job-*"); //<1> + request.setAllowNoJobs(true); //<2> + //end::x-pack-ml-get-job-stats-request + + //tag::x-pack-ml-get-job-stats-execute + GetJobStatsResponse response = client.machineLearning().getJobStats(request, RequestOptions.DEFAULT); + //end::x-pack-ml-get-job-stats-execute + + //tag::x-pack-ml-get-job-stats-response + long numberOfJobStats = response.count(); //<1> + List jobStats = response.jobStats(); //<2> + //end::x-pack-ml-get-job-stats-response + + assertEquals(2, response.count()); + assertThat(response.jobStats(), hasSize(2)); + assertThat(response.jobStats().stream().map(JobStats::getJobId).collect(Collectors.toList()), + containsInAnyOrder(job.getId(), secondJob.getId())); + } + { + GetJobStatsRequest request = new GetJobStatsRequest("get-machine-learning-job-stats1", "get-machine-learning-job-*"); + + // tag::x-pack-ml-get-job-stats-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(GetJobStatsResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-ml-get-job-stats-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-ml-get-job-stats-execute-async + client.machineLearning().getJobStatsAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-ml-get-job-stats-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testGetRecords() throws IOException, InterruptedException { RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobRequestTests.java new file mode 100644 index 00000000000..c2bddd436cc --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobRequestTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class FlushJobRequestTests extends AbstractXContentTestCase { + + @Override + protected FlushJobRequest createTestInstance() { + FlushJobRequest request = new FlushJobRequest(randomAlphaOfLengthBetween(1, 20)); + + if (randomBoolean()) { + request.setCalcInterim(randomBoolean()); + } + if (randomBoolean()) { + request.setAdvanceTime(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setStart(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setEnd(String.valueOf(randomLong())); + } + if (randomBoolean()) { + request.setSkipTime(String.valueOf(randomLong())); + } + return request; + } + + @Override + protected FlushJobRequest doParseInstance(XContentParser parser) throws IOException { + return FlushJobRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobResponseTests.java new file mode 100644 index 00000000000..bc968ff4564 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/FlushJobResponseTests.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.Date; + +public class FlushJobResponseTests extends AbstractXContentTestCase { + + @Override + protected FlushJobResponse createTestInstance() { + return new FlushJobResponse(randomBoolean(), + randomBoolean() ? null : new Date(randomNonNegativeLong())); + } + + @Override + protected FlushJobResponse doParseInstance(XContentParser parser) throws IOException { + return FlushJobResponse.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobResponseTests.java index 181804c9676..8cc990730f7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobResponseTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.function.Predicate; public class GetJobResponseTests extends AbstractXContentTestCase { @@ -46,8 +47,13 @@ public class GetJobResponseTests extends AbstractXContentTestCase getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + @Override protected boolean supportsUnknownFields() { - return false; + return true; } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsRequestTests.java new file mode 100644 index 00000000000..690e5829766 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsRequestTests.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetJobStatsRequestTests extends AbstractXContentTestCase { + + public void testAllJobsRequest() { + GetJobStatsRequest request = GetJobStatsRequest.getAllJobStatsRequest(); + + assertEquals(request.getJobIds().size(), 1); + assertEquals(request.getJobIds().get(0), "_all"); + } + + public void testNewWithJobId() { + Exception exception = expectThrows(NullPointerException.class, () -> new GetJobStatsRequest("job", null)); + assertEquals(exception.getMessage(), "jobIds must not contain null values"); + } + + @Override + protected GetJobStatsRequest createTestInstance() { + int jobCount = randomIntBetween(0, 10); + List jobIds = new ArrayList<>(jobCount); + + for (int i = 0; i < jobCount; i++) { + jobIds.add(randomAlphaOfLength(10)); + } + + GetJobStatsRequest request = new GetJobStatsRequest(jobIds); + + if (randomBoolean()) { + request.setAllowNoJobs(randomBoolean()); + } + + return request; + } + + @Override + protected GetJobStatsRequest doParseInstance(XContentParser parser) throws IOException { + return GetJobStatsRequest.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsResponseTests.java new file mode 100644 index 00000000000..23f7bcc042b --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetJobStatsResponseTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.ml.job.stats.JobStats; +import org.elasticsearch.client.ml.job.stats.JobStatsTests; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetJobStatsResponseTests extends AbstractXContentTestCase { + + @Override + protected GetJobStatsResponse createTestInstance() { + + int count = randomIntBetween(1, 5); + List results = new ArrayList<>(count); + for(int i = 0; i < count; i++) { + results.add(JobStatsTests.createRandomInstance()); + } + + return new GetJobStatsResponse(results, count); + } + + @Override + protected GetJobStatsResponse doParseInstance(XContentParser parser) throws IOException { + return GetJobStatsResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/NodeAttributesTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/NodeAttributesTests.java new file mode 100644 index 00000000000..cee1710a622 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/NodeAttributesTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; + +public class NodeAttributesTests extends AbstractXContentTestCase { + + public static NodeAttributes createRandom() { + int numberOfAttributes = randomIntBetween(1, 10); + Map attributes = new HashMap<>(numberOfAttributes); + for(int i = 0; i < numberOfAttributes; i++) { + String val = randomAlphaOfLength(10); + attributes.put("key-"+i, val); + } + return new NodeAttributes(randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + randomAlphaOfLength(10), + attributes); + } + + @Override + protected NodeAttributes createTestInstance() { + return createRandom(); + } + + @Override + protected NodeAttributes doParseInstance(XContentParser parser) throws IOException { + return NodeAttributes.PARSER.parse(parser, null); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/ForecastStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/ForecastStatsTests.java new file mode 100644 index 00000000000..16dfa305479 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/ForecastStatsTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; + +public class ForecastStatsTests extends AbstractXContentTestCase { + + @Override + public ForecastStats createTestInstance() { + if (randomBoolean()) { + return createRandom(1, 22); + } + return new ForecastStats(0, null,null,null,null); + } + + @Override + protected ForecastStats doParseInstance(XContentParser parser) throws IOException { + return ForecastStats.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + public static ForecastStats createRandom(long minTotal, long maxTotal) { + return new ForecastStats( + randomLongBetween(minTotal, maxTotal), + SimpleStatsTests.createRandom(), + SimpleStatsTests.createRandom(), + SimpleStatsTests.createRandom(), + createCountStats()); + } + + private static Map createCountStats() { + Map countStats = new HashMap<>(); + for (int i = 0; i < randomInt(10); ++i) { + countStats.put(randomAlphaOfLengthBetween(1, 20), randomLongBetween(1L, 100L)); + } + return countStats; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/JobStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/JobStatsTests.java new file mode 100644 index 00000000000..5d00f879140 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/JobStatsTests.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.client.ml.NodeAttributes; +import org.elasticsearch.client.ml.NodeAttributesTests; +import org.elasticsearch.client.ml.job.process.DataCounts; +import org.elasticsearch.client.ml.job.process.DataCountsTests; +import org.elasticsearch.client.ml.job.process.ModelSizeStats; +import org.elasticsearch.client.ml.job.process.ModelSizeStatsTests; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.client.ml.job.config.JobState; +import org.elasticsearch.client.ml.job.config.JobTests; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.function.Predicate; + + +public class JobStatsTests extends AbstractXContentTestCase { + + public static JobStats createRandomInstance() { + String jobId = JobTests.randomValidJobId(); + JobState state = randomFrom(JobState.CLOSING, JobState.CLOSED, JobState.OPENED, JobState.FAILED, JobState.OPENING); + DataCounts dataCounts = DataCountsTests.createTestInstance(jobId); + + ModelSizeStats modelSizeStats = randomBoolean() ? ModelSizeStatsTests.createRandomized() : null; + ForecastStats forecastStats = randomBoolean() ? ForecastStatsTests.createRandom(1, 22) : null; + NodeAttributes nodeAttributes = randomBoolean() ? NodeAttributesTests.createRandom() : null; + String assigmentExplanation = randomBoolean() ? randomAlphaOfLength(10) : null; + TimeValue openTime = randomBoolean() ? TimeValue.timeValueMillis(randomIntBetween(1, 10000)) : null; + + return new JobStats(jobId, dataCounts, state, modelSizeStats, forecastStats, nodeAttributes, assigmentExplanation, openTime); + } + + @Override + protected JobStats createTestInstance() { + return createRandomInstance(); + } + + @Override + protected JobStats doParseInstance(XContentParser parser) throws IOException { + return JobStats.PARSER.parse(parser, null); + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> !field.isEmpty(); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/SimpleStatsTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/SimpleStatsTests.java new file mode 100644 index 00000000000..eb9e47af9ba --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/stats/SimpleStatsTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml.job.stats; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + + +public class SimpleStatsTests extends AbstractXContentTestCase { + + @Override + protected SimpleStats createTestInstance() { + return createRandom(); + } + + @Override + protected SimpleStats doParseInstance(XContentParser parser) throws IOException { + return SimpleStats.PARSER.parse(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + public static SimpleStats createRandom() { + return new SimpleStats(randomDouble(), randomDouble(), randomDouble(), randomDouble()); + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java index 934b9526086..a7afbc8ffbd 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java @@ -85,7 +85,7 @@ import static java.util.Collections.singletonList; * The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later * by calling {@link #setNodes(Collection)}. *

- * The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When + * The method {@link #performRequest(Request)} allows to send a request to the cluster. When * sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and * retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously * failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead nodes that @@ -145,17 +145,6 @@ public class RestClient implements Closeable { return new RestClientBuilder(hostsToNodes(hosts)); } - /** - * Replaces the hosts with which the client communicates. - * - * @deprecated prefer {@link #setNodes(Collection)} because it allows you - * to set metadata for use with {@link NodeSelector}s - */ - @Deprecated - public void setHosts(HttpHost... hosts) { - setNodes(hostsToNodes(hosts)); - } - /** * Replaces the nodes with which the client communicates. */ @@ -251,234 +240,6 @@ public class RestClient implements Closeable { } } - /** - * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response - * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters - * and request body. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param headers the optional request headers - * @return the response returned by Elasticsearch - * @throws IOException in case of a problem or the connection was aborted - * @throws ClientProtocolException in case of an http protocol error - * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error - * @deprecated prefer {@link #performRequest(Request)} - */ - @Deprecated - public Response performRequest(String method, String endpoint, Header... headers) throws IOException { - Request request = new Request(method, endpoint); - addHeaders(request, headers); - return performRequest(request); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response - * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without request body. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param headers the optional request headers - * @return the response returned by Elasticsearch - * @throws IOException in case of a problem or the connection was aborted - * @throws ClientProtocolException in case of an http protocol error - * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error - * @deprecated prefer {@link #performRequest(Request)} - */ - @Deprecated - public Response performRequest(String method, String endpoint, Map params, Header... headers) throws IOException { - Request request = new Request(method, endpoint); - addParameters(request, params); - addHeaders(request, headers); - return performRequest(request); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response - * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumerFactory, Header...)} - * which doesn't require specifying an {@link HttpAsyncResponseConsumerFactory} instance, - * {@link HttpAsyncResponseConsumerFactory} will be used to create the needed instances of {@link HttpAsyncResponseConsumer}. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param entity the body of the request, null if not applicable - * @param headers the optional request headers - * @return the response returned by Elasticsearch - * @throws IOException in case of a problem or the connection was aborted - * @throws ClientProtocolException in case of an http protocol error - * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error - * @deprecated prefer {@link #performRequest(Request)} - */ - @Deprecated - public Response performRequest(String method, String endpoint, Map params, - HttpEntity entity, Header... headers) throws IOException { - Request request = new Request(method, endpoint); - addParameters(request, params); - request.setEntity(entity); - addHeaders(request, headers); - return performRequest(request); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. Blocks until the request is completed and returns - * its response or fails by throwing an exception. Selects a host out of the provided ones in a round-robin fashion. Failing hosts - * are marked dead and retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times - * they previously failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead - * nodes that deserve a retry) are retried until one responds or none of them does, in which case an {@link IOException} will be thrown. - * - * This method works by performing an asynchronous call and waiting - * for the result. If the asynchronous call throws an exception we wrap - * it and rethrow it so that the stack trace attached to the exception - * contains the call site. While we attempt to preserve the original - * exception this isn't always possible and likely haven't covered all of - * the cases. You can get the original exception from - * {@link Exception#getCause()}. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param entity the body of the request, null if not applicable - * @param httpAsyncResponseConsumerFactory the {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the response body gets streamed from a non-blocking HTTP - * connection on the client side. - * @param headers the optional request headers - * @return the response returned by Elasticsearch - * @throws IOException in case of a problem or the connection was aborted - * @throws ClientProtocolException in case of an http protocol error - * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error - * @deprecated prefer {@link #performRequest(Request)} - */ - @Deprecated - public Response performRequest(String method, String endpoint, Map params, - HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, - Header... headers) throws IOException { - Request request = new Request(method, endpoint); - addParameters(request, params); - request.setEntity(entity); - setOptions(request, httpAsyncResponseConsumerFactory, headers); - return performRequest(request); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead - * the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to - * {@link #performRequestAsync(String, String, Map, HttpEntity, ResponseListener, Header...)} but without parameters and request body. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param responseListener the {@link ResponseListener} to notify when the request is completed or fails - * @param headers the optional request headers - * @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)} - */ - @Deprecated - public void performRequestAsync(String method, String endpoint, ResponseListener responseListener, Header... headers) { - Request request; - try { - request = new Request(method, endpoint); - addHeaders(request, headers); - } catch (Exception e) { - responseListener.onFailure(e); - return; - } - performRequestAsync(request, responseListener); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead - * the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to - * {@link #performRequestAsync(String, String, Map, HttpEntity, ResponseListener, Header...)} but without request body. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param responseListener the {@link ResponseListener} to notify when the request is completed or fails - * @param headers the optional request headers - * @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)} - */ - @Deprecated - public void performRequestAsync(String method, String endpoint, Map params, - ResponseListener responseListener, Header... headers) { - Request request; - try { - request = new Request(method, endpoint); - addParameters(request, params); - addHeaders(request, headers); - } catch (Exception e) { - responseListener.onFailure(e); - return; - } - performRequestAsync(request, responseListener); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. Doesn't wait for the response, instead - * the provided {@link ResponseListener} will be notified upon completion or failure. - * Shortcut to {@link #performRequestAsync(String, String, Map, HttpEntity, HttpAsyncResponseConsumerFactory, ResponseListener, - * Header...)} which doesn't require specifying an {@link HttpAsyncResponseConsumerFactory} instance, - * {@link HttpAsyncResponseConsumerFactory} will be used to create the needed instances of {@link HttpAsyncResponseConsumer}. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param entity the body of the request, null if not applicable - * @param responseListener the {@link ResponseListener} to notify when the request is completed or fails - * @param headers the optional request headers - * @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)} - */ - @Deprecated - public void performRequestAsync(String method, String endpoint, Map params, - HttpEntity entity, ResponseListener responseListener, Header... headers) { - Request request; - try { - request = new Request(method, endpoint); - addParameters(request, params); - request.setEntity(entity); - addHeaders(request, headers); - } catch (Exception e) { - responseListener.onFailure(e); - return; - } - performRequestAsync(request, responseListener); - } - - /** - * Sends a request to the Elasticsearch cluster that the client points to. The request is executed asynchronously - * and the provided {@link ResponseListener} gets notified upon request completion or failure. - * Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain - * amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously failed (the more failures, - * the later they will be retried). In case of failures all of the alive nodes (or dead nodes that deserve a retry) are retried - * until one responds or none of them does, in which case an {@link IOException} will be thrown. - * - * @param method the http method - * @param endpoint the path of the request (without host and port) - * @param params the query_string parameters - * @param entity the body of the request, null if not applicable - * @param httpAsyncResponseConsumerFactory the {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the response body gets streamed from a non-blocking HTTP - * connection on the client side. - * @param responseListener the {@link ResponseListener} to notify when the request is completed or fails - * @param headers the optional request headers - * @deprecated prefer {@link #performRequestAsync(Request, ResponseListener)} - */ - @Deprecated - public void performRequestAsync(String method, String endpoint, Map params, - HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, - ResponseListener responseListener, Header... headers) { - Request request; - try { - request = new Request(method, endpoint); - addParameters(request, params); - request.setEntity(entity); - setOptions(request, httpAsyncResponseConsumerFactory, headers); - } catch (Exception e) { - responseListener.onFailure(e); - return; - } - performRequestAsync(request, responseListener); - } - void performRequestAsyncNoCatch(Request request, ResponseListener listener) throws IOException { Map requestParams = new HashMap<>(request.getParameters()); //ignore is a special parameter supported by the clients, shouldn't be sent to es @@ -1035,42 +796,4 @@ public class RestClient implements Closeable { itr.remove(); } } - - /** - * Add all headers from the provided varargs argument to a {@link Request}. This only exists - * to support methods that exist for backwards compatibility. - */ - @Deprecated - private static void addHeaders(Request request, Header... headers) { - setOptions(request, RequestOptions.DEFAULT.getHttpAsyncResponseConsumerFactory(), headers); - } - - /** - * Add all headers from the provided varargs argument to a {@link Request}. This only exists - * to support methods that exist for backwards compatibility. - */ - @Deprecated - private static void setOptions(Request request, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, - Header... headers) { - Objects.requireNonNull(headers, "headers cannot be null"); - RequestOptions.Builder options = request.getOptions().toBuilder(); - for (Header header : headers) { - Objects.requireNonNull(header, "header cannot be null"); - options.addHeader(header.getName(), header.getValue()); - } - options.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory); - request.setOptions(options); - } - - /** - * Add all parameters from a map to a {@link Request}. This only exists - * to support methods that exist for backwards compatibility. - */ - @Deprecated - private static void addParameters(Request request, Map parameters) { - Objects.requireNonNull(parameters, "parameters cannot be null"); - for (Map.Entry entry : parameters.entrySet()) { - request.addParameter(entry.getKey(), entry.getValue()); - } - } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 6b5bb3c98ee..fb58f18d42a 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -45,7 +45,6 @@ import java.io.OutputStream; import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -215,9 +214,15 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase { } final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header"); final int statusCode = randomStatusCode(getRandom()); + Request request = new Request(method, "/" + statusCode); + RequestOptions.Builder options = request.getOptions().toBuilder(); + for (Header header : requestHeaders) { + options.addHeader(header.getName(), header.getValue()); + } + request.setOptions(options); Response esResponse; try { - esResponse = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), requestHeaders); + esResponse = restClient.performRequest(request); } catch (ResponseException e) { esResponse = e.getResponse(); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index cb326f4a24c..0c589e6a40c 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -59,7 +59,6 @@ import java.net.URI; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; -import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -69,7 +68,6 @@ import static java.util.Collections.singletonList; import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.elasticsearch.client.RestClientTestUtil.getOkStatusCodes; -import static org.elasticsearch.client.RestClientTestUtil.randomHttpMethod; import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode; import static org.elasticsearch.client.SyncResponseListenerTests.assertExceptionStackContainsCallingMethod; import static org.hamcrest.CoreMatchers.equalTo; @@ -192,7 +190,7 @@ public class RestClientSingleHostTests extends RestClientTestCase { public void testOkStatusCodes() throws IOException { for (String method : getHttpMethods()) { for (int okStatusCode : getOkStatusCodes()) { - Response response = performRequest(method, "/" + okStatusCode); + Response response = restClient.performRequest(new Request(method, "/" + okStatusCode)); assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode)); } } @@ -223,13 +221,11 @@ public class RestClientSingleHostTests extends RestClientTestCase { //error status codes should cause an exception to be thrown for (int errorStatusCode : getAllErrorStatusCodes()) { try { - Map params; - if (ignoreParam.isEmpty()) { - params = Collections.emptyMap(); - } else { - params = Collections.singletonMap("ignore", ignoreParam); + Request request = new Request(method, "/" + errorStatusCode); + if (false == ignoreParam.isEmpty()) { + request.addParameter("ignore", ignoreParam); } - Response response = performRequest(method, "/" + errorStatusCode, params); + Response response = restClient.performRequest(request); if (expectedIgnores.contains(errorStatusCode)) { //no exception gets thrown although we got an error status code, as it was configured to be ignored assertEquals(errorStatusCode, response.getStatusLine().getStatusCode()); @@ -256,14 +252,14 @@ public class RestClientSingleHostTests extends RestClientTestCase { for (String method : getHttpMethods()) { //IOExceptions should be let bubble up try { - performRequest(method, "/coe"); + restClient.performRequest(new Request(method, "/coe")); fail("request should have failed"); } catch(IOException e) { assertThat(e, instanceOf(ConnectTimeoutException.class)); } failureListener.assertCalled(singletonList(node)); try { - performRequest(method, "/soe"); + restClient.performRequest(new Request(method, "/soe")); fail("request should have failed"); } catch(IOException e) { assertThat(e, instanceOf(SocketTimeoutException.class)); @@ -313,48 +309,6 @@ public class RestClientSingleHostTests extends RestClientTestCase { } } - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests}. - */ - @Deprecated - public void tesPerformRequestOldStyleNullHeaders() throws IOException { - String method = randomHttpMethod(getRandom()); - int statusCode = randomStatusCode(getRandom()); - try { - performRequest(method, "/" + statusCode, (Header[])null); - fail("request should have failed"); - } catch(NullPointerException e) { - assertEquals("request headers must not be null", e.getMessage()); - } - try { - performRequest(method, "/" + statusCode, (Header)null); - fail("request should have failed"); - } catch(NullPointerException e) { - assertEquals("request header must not be null", e.getMessage()); - } - } - - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}. - */ - @Deprecated - public void testPerformRequestOldStyleWithNullParams() throws IOException { - String method = randomHttpMethod(getRandom()); - int statusCode = randomStatusCode(getRandom()); - try { - restClient.performRequest(method, "/" + statusCode, (Map)null); - fail("request should have failed"); - } catch(NullPointerException e) { - assertEquals("parameters cannot be null", e.getMessage()); - } - try { - restClient.performRequest(method, "/" + statusCode, null, (HttpEntity)null); - fail("request should have failed"); - } catch(NullPointerException e) { - assertEquals("parameters cannot be null", e.getMessage()); - } - } - /** * End to end test for request and response headers. Exercises the mock http client ability to send back * whatever headers it has received. @@ -464,35 +418,4 @@ public class RestClientSingleHostTests extends RestClientTestCase { } return expectedRequest; } - - /** - * @deprecated prefer {@link RestClient#performRequest(Request)}. - */ - @Deprecated - private Response performRequest(String method, String endpoint, Header... headers) throws IOException { - return performRequest(method, endpoint, Collections.emptyMap(), headers); - } - - /** - * @deprecated prefer {@link RestClient#performRequest(Request)}. - */ - @Deprecated - private Response performRequest(String method, String endpoint, Map params, Header... headers) throws IOException { - int methodSelector; - if (params.isEmpty()) { - methodSelector = randomIntBetween(0, 2); - } else { - methodSelector = randomIntBetween(1, 2); - } - switch(methodSelector) { - case 0: - return restClient.performRequest(method, endpoint, headers); - case 1: - return restClient.performRequest(method, endpoint, params, headers); - case 2: - return restClient.performRequest(method, endpoint, params, (HttpEntity)null, headers); - default: - throw new UnsupportedOperationException(); - } - } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index ef94b70542f..4a037b18404 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -42,7 +42,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import static java.util.Collections.singletonList; -import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.hamcrest.Matchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; @@ -90,88 +89,6 @@ public class RestClientTests extends RestClientTestCase { } } - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link #testPerformAsyncWithUnsupportedMethod()}. - */ - @Deprecated - public void testPerformAsyncOldStyleWithUnsupportedMethod() throws Exception { - final CountDownLatch latch = new CountDownLatch(1); - try (RestClient restClient = createRestClient()) { - restClient.performRequestAsync("unsupported", randomAsciiLettersOfLength(5), new ResponseListener() { - @Override - public void onSuccess(Response response) { - throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); - } - - @Override - public void onFailure(Exception exception) { - try { - assertThat(exception, instanceOf(UnsupportedOperationException.class)); - assertEquals("http method not supported: unsupported", exception.getMessage()); - } finally { - latch.countDown(); - } - } - }); - assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); - } - } - - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}. - */ - @Deprecated - public void testPerformOldStyleAsyncWithNullParams() throws Exception { - final CountDownLatch latch = new CountDownLatch(1); - try (RestClient restClient = createRestClient()) { - restClient.performRequestAsync(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5), null, new ResponseListener() { - @Override - public void onSuccess(Response response) { - throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); - } - - @Override - public void onFailure(Exception exception) { - try { - assertThat(exception, instanceOf(NullPointerException.class)); - assertEquals("parameters cannot be null", exception.getMessage()); - } finally { - latch.countDown(); - } - } - }); - assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); - } - } - - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests}. - */ - @Deprecated - public void testPerformOldStyleAsyncWithNullHeaders() throws Exception { - final CountDownLatch latch = new CountDownLatch(1); - try (RestClient restClient = createRestClient()) { - ResponseListener listener = new ResponseListener() { - @Override - public void onSuccess(Response response) { - throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); - } - - @Override - public void onFailure(Exception exception) { - try { - assertThat(exception, instanceOf(NullPointerException.class)); - assertEquals("header cannot be null", exception.getMessage()); - } finally { - latch.countDown(); - } - } - }; - restClient.performRequestAsync("GET", randomAsciiLettersOfLength(5), listener, (Header) null); - assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); - } - } - public void testPerformAsyncWithWrongEndpoint() throws Exception { final CountDownLatch latch = new CountDownLatch(1); try (RestClient restClient = createRestClient()) { @@ -195,33 +112,6 @@ public class RestClientTests extends RestClientTestCase { } } - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link #testPerformAsyncWithWrongEndpoint()}. - */ - @Deprecated - public void testPerformAsyncOldStyleWithWrongEndpoint() throws Exception { - final CountDownLatch latch = new CountDownLatch(1); - try (RestClient restClient = createRestClient()) { - restClient.performRequestAsync("GET", "::http:///", new ResponseListener() { - @Override - public void onSuccess(Response response) { - throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client"); - } - - @Override - public void onFailure(Exception exception) { - try { - assertThat(exception, instanceOf(IllegalArgumentException.class)); - assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage()); - } finally { - latch.countDown(); - } - } - }); - assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS)); - } - } - public void testBuildUriLeavesPathUntouched() { final Map emptyMap = Collections.emptyMap(); { @@ -259,34 +149,6 @@ public class RestClientTests extends RestClientTestCase { } } - @Deprecated - public void testSetHostsWrongArguments() throws IOException { - try (RestClient restClient = createRestClient()) { - restClient.setHosts((HttpHost[]) null); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null nor empty", e.getMessage()); - } - try (RestClient restClient = createRestClient()) { - restClient.setHosts(); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null nor empty", e.getMessage()); - } - try (RestClient restClient = createRestClient()) { - restClient.setHosts((HttpHost) null); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("host cannot be null", e.getMessage()); - } - try (RestClient restClient = createRestClient()) { - restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("host cannot be null", e.getMessage()); - } - } - public void testSetNodesWrongArguments() throws IOException { try (RestClient restClient = createRestClient()) { restClient.setNodes(null); @@ -348,23 +210,6 @@ public class RestClientTests extends RestClientTestCase { } } - /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testConstructor()}. - */ - @Deprecated - public void testNullPath() throws IOException { - try (RestClient restClient = createRestClient()) { - for (String method : getHttpMethods()) { - try { - restClient.performRequest(method, null); - fail("path set to null should fail!"); - } catch (NullPointerException e) { - assertEquals("endpoint cannot be null", e.getMessage()); - } - } - } - } - public void testSelectHosts() throws IOException { Node n1 = new Node(new HttpHost("1"), null, null, "1", null, null); Node n2 = new Node(new HttpHost("2"), null, null, "2", null, null); diff --git a/distribution/build.gradle b/distribution/build.gradle index 675799c5b22..317ece6bf2b 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -23,6 +23,7 @@ import org.elasticsearch.gradle.NoticeTask import org.elasticsearch.gradle.test.RunTask import org.apache.tools.ant.filters.FixCrLfFilter +import java.nio.file.Files import java.nio.file.Path Collection distributions = project('archives').subprojects + project('packages').subprojects @@ -504,4 +505,16 @@ subprojects { } return result } + + ext.assertLinesInFile = { Path path, List expectedLines -> + final List actualLines = Files.readAllLines(path) + int line = 0 + for (final String expectedLine : expectedLines) { + final String actualLine = actualLines.get(line) + if (expectedLine != actualLine) { + throw new GradleException("expected line [${line + 1}] in [${path}] to be [${expectedLine}] but was [${actualLine}]") + } + line++ + } + } } diff --git a/docs/build.gradle b/docs/build.gradle index 980c99baf83..c6a7a8d4837 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -19,6 +19,22 @@ apply plugin: 'elasticsearch.docs-test' +/* List of files that have snippets that will not work until platinum tests can occur ... */ +buildRestTests.expectedUnconvertedCandidates = [ + 'reference/ml/transforms.asciidoc', + 'reference/ml/apis/delete-calendar-event.asciidoc', + 'reference/ml/apis/get-bucket.asciidoc', + 'reference/ml/apis/get-category.asciidoc', + 'reference/ml/apis/get-influencer.asciidoc', + 'reference/ml/apis/get-job-stats.asciidoc', + 'reference/ml/apis/get-overall-buckets.asciidoc', + 'reference/ml/apis/get-record.asciidoc', + 'reference/ml/apis/get-snapshot.asciidoc', + 'reference/ml/apis/post-data.asciidoc', + 'reference/ml/apis/revert-snapshot.asciidoc', + 'reference/ml/apis/update-snapshot.asciidoc', +] + integTestCluster { /* Enable regexes in painless so our tests don't complain about example * snippets that use them. */ @@ -74,6 +90,17 @@ buildRestTests.docs = fileTree(projectDir) { exclude 'build' // Just syntax examples exclude 'README.asciidoc' + // Broken code snippet tests + exclude 'reference/rollup/rollup-getting-started.asciidoc' + exclude 'reference/rollup/apis/rollup-job-config.asciidoc' + exclude 'reference/rollup/apis/rollup-index-caps.asciidoc' + exclude 'reference/rollup/apis/put-job.asciidoc' + exclude 'reference/rollup/apis/stop-job.asciidoc' + exclude 'reference/rollup/apis/start-job.asciidoc' + exclude 'reference/rollup/apis/rollup-search.asciidoc' + exclude 'reference/rollup/apis/delete-job.asciidoc' + exclude 'reference/rollup/apis/get-job.asciidoc' + exclude 'reference/rollup/apis/rollup-caps.asciidoc' } listSnippets.docs = buildRestTests.docs @@ -594,3 +621,480 @@ buildRestTests.setups['library'] = ''' {"name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288} ''' +buildRestTests.setups['sensor_rollup_job'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + - do: + xpack.rollup.put_job: + id: "sensor" + body: > + { + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "*/30 * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] + } +''' +buildRestTests.setups['sensor_started_rollup_job'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + + - do: + bulk: + index: sensor-1 + type: _doc + refresh: true + body: | + {"index":{}} + {"timestamp": 1516729294000, "temperature": 200, "voltage": 5.2, "node": "a"} + {"index":{}} + {"timestamp": 1516642894000, "temperature": 201, "voltage": 5.8, "node": "b"} + {"index":{}} + {"timestamp": 1516556494000, "temperature": 202, "voltage": 5.1, "node": "a"} + {"index":{}} + {"timestamp": 1516470094000, "temperature": 198, "voltage": 5.6, "node": "b"} + {"index":{}} + {"timestamp": 1516383694000, "temperature": 200, "voltage": 4.2, "node": "c"} + {"index":{}} + {"timestamp": 1516297294000, "temperature": 202, "voltage": 4.0, "node": "c"} + + - do: + xpack.rollup.put_job: + id: "sensor" + body: > + { + "index_pattern": "sensor-*", + "rollup_index": "sensor_rollup", + "cron": "* * * * * ?", + "page_size" :1000, + "groups" : { + "date_histogram": { + "field": "timestamp", + "interval": "1h", + "delay": "7d" + }, + "terms": { + "fields": ["node"] + } + }, + "metrics": [ + { + "field": "temperature", + "metrics": ["min", "max", "sum"] + }, + { + "field": "voltage", + "metrics": ["avg"] + } + ] + } + - do: + xpack.rollup.start_job: + id: "sensor" +''' + +buildRestTests.setups['sensor_index'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + load: + type: double + net_in: + type: long + net_out: + type: long + hostname: + type: keyword + datacenter: + type: keyword +''' + +buildRestTests.setups['sensor_prefab_data'] = ''' + - do: + indices.create: + index: sensor-1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + timestamp: + type: date + temperature: + type: long + voltage: + type: float + node: + type: keyword + - do: + indices.create: + index: sensor_rollup + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + node.terms.value: + type: keyword + temperature.sum.value: + type: double + temperature.max.value: + type: double + temperature.min.value: + type: double + timestamp.date_histogram.time_zone: + type: keyword + timestamp.date_histogram.interval: + type: keyword + timestamp.date_histogram.timestamp: + type: date + timestamp.date_histogram._count: + type: long + voltage.avg.value: + type: double + voltage.avg._count: + type: long + _rollup.id: + type: keyword + _rollup.version: + type: long + _meta: + _rollup: + sensor: + cron: "* * * * * ?" + rollup_index: "sensor_rollup" + index_pattern: "sensor-*" + timeout: "20s" + page_size: 1000 + groups: + date_histogram: + delay: "7d" + field: "timestamp" + interval: "1h" + time_zone: "UTC" + terms: + fields: + - "node" + id: sensor + metrics: + - field: "temperature" + metrics: + - min + - max + - sum + - field: "voltage" + metrics: + - avg + + - do: + bulk: + index: sensor_rollup + type: _doc + refresh: true + body: | + {"index":{}} + {"node.terms.value":"b","temperature.sum.value":201.0,"temperature.max.value":201.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":201.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.800000190734863,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516640400000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"c","temperature.sum.value":200.0,"temperature.max.value":200.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":200.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.199999809265137,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516381200000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"a","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.099999904632568,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516554000000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"a","temperature.sum.value":200.0,"temperature.max.value":200.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":200.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.199999809265137,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516726800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"b","temperature.sum.value":198.0,"temperature.max.value":198.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":198.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":5.599999904632568,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516467600000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + {"index":{}} + {"node.terms.value":"c","temperature.sum.value":202.0,"temperature.max.value":202.0,"timestamp.date_histogram.time_zone":"UTC","temperature.min.value":202.0,"timestamp.date_histogram._count":1,"timestamp.date_histogram.interval":"1h","_rollup.computed":["temperature.sum","temperature.min","voltage.avg","temperature.max","node.terms","timestamp.date_histogram"],"voltage.avg.value":4.0,"node.terms._count":1,"_rollup.version":1,"timestamp.date_histogram.timestamp":1516294800000,"voltage.avg._count":1.0,"_rollup.id":"sensor"} + +''' +buildRestTests.setups['sample_job'] = ''' + - do: + xpack.ml.put_job: + job_id: "sample_job" + body: > + { + "description" : "Very basic job", + "analysis_config" : { + "bucket_span":"10m", + "detectors" :[ + { + "function": "count" + } + ]}, + "data_description" : { + "time_field":"timestamp", + "time_format": "epoch_ms" + } + } +''' +buildRestTests.setups['farequote_index'] = ''' + - do: + indices.create: + index: farequote + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + metric: + properties: + time: + type: date + responsetime: + type: float + airline: + type: keyword + doc_count: + type: integer +''' +buildRestTests.setups['farequote_data'] = buildRestTests.setups['farequote_index'] + ''' + - do: + bulk: + index: farequote + type: metric + refresh: true + body: | + {"index": {"_id":"1"}} + {"airline":"JZA","responsetime":990.4628,"time":"2016-02-07T00:00:00+0000", "doc_count": 5} + {"index": {"_id":"2"}} + {"airline":"JBU","responsetime":877.5927,"time":"2016-02-07T00:00:00+0000", "doc_count": 23} + {"index": {"_id":"3"}} + {"airline":"KLM","responsetime":1355.4812,"time":"2016-02-07T00:00:00+0000", "doc_count": 42} +''' +buildRestTests.setups['farequote_job'] = buildRestTests.setups['farequote_data'] + ''' + - do: + xpack.ml.put_job: + job_id: "farequote" + body: > + { + "analysis_config": { + "bucket_span": "60m", + "detectors": [{ + "function": "mean", + "field_name": "responsetime", + "by_field_name": "airline" + }], + "summary_count_field_name": "doc_count" + }, + "data_description": { + "time_field": "time" + } + } +''' +buildRestTests.setups['farequote_datafeed'] = buildRestTests.setups['farequote_job'] + ''' + - do: + xpack.ml.put_datafeed: + datafeed_id: "datafeed-farequote" + body: > + { + "job_id":"farequote", + "indexes":"farequote" + } +''' +buildRestTests.setups['server_metrics_index'] = ''' + - do: + indices.create: + index: server-metrics + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + metric: + properties: + timestamp: + type: date + total: + type: long +''' +buildRestTests.setups['server_metrics_data'] = buildRestTests.setups['server_metrics_index'] + ''' + - do: + bulk: + index: server-metrics + type: metric + refresh: true + body: | + {"index": {"_id":"1177"}} + {"timestamp":"2017-03-23T13:00:00","total":40476} + {"index": {"_id":"1178"}} + {"timestamp":"2017-03-23T13:00:00","total":15287} + {"index": {"_id":"1179"}} + {"timestamp":"2017-03-23T13:00:00","total":-776} + {"index": {"_id":"1180"}} + {"timestamp":"2017-03-23T13:00:00","total":11366} + {"index": {"_id":"1181"}} + {"timestamp":"2017-03-23T13:00:00","total":3606} + {"index": {"_id":"1182"}} + {"timestamp":"2017-03-23T13:00:00","total":19006} + {"index": {"_id":"1183"}} + {"timestamp":"2017-03-23T13:00:00","total":38613} + {"index": {"_id":"1184"}} + {"timestamp":"2017-03-23T13:00:00","total":19516} + {"index": {"_id":"1185"}} + {"timestamp":"2017-03-23T13:00:00","total":-258} + {"index": {"_id":"1186"}} + {"timestamp":"2017-03-23T13:00:00","total":9551} + {"index": {"_id":"1187"}} + {"timestamp":"2017-03-23T13:00:00","total":11217} + {"index": {"_id":"1188"}} + {"timestamp":"2017-03-23T13:00:00","total":22557} + {"index": {"_id":"1189"}} + {"timestamp":"2017-03-23T13:00:00","total":40508} + {"index": {"_id":"1190"}} + {"timestamp":"2017-03-23T13:00:00","total":11887} + {"index": {"_id":"1191"}} + {"timestamp":"2017-03-23T13:00:00","total":31659} +''' +buildRestTests.setups['server_metrics_job'] = buildRestTests.setups['server_metrics_data'] + ''' + - do: + xpack.ml.put_job: + job_id: "total-requests" + body: > + { + "description" : "Total sum of requests", + "analysis_config" : { + "bucket_span":"10m", + "detectors" :[ + { + "detector_description": "Sum of total", + "function": "sum", + "field_name": "total" + } + ]}, + "data_description" : { + "time_field":"timestamp", + "time_format": "epoch_ms" + } + } +''' +buildRestTests.setups['server_metrics_datafeed'] = buildRestTests.setups['server_metrics_job'] + ''' + - do: + xpack.ml.put_datafeed: + datafeed_id: "datafeed-total-requests" + body: > + { + "job_id":"total-requests", + "indexes":"server-metrics" + } +''' +buildRestTests.setups['server_metrics_openjob'] = buildRestTests.setups['server_metrics_datafeed'] + ''' + - do: + xpack.ml.open_job: + job_id: "total-requests" +''' +buildRestTests.setups['server_metrics_startdf'] = buildRestTests.setups['server_metrics_openjob'] + ''' + - do: + xpack.ml.start_datafeed: + datafeed_id: "datafeed-total-requests" +''' +buildRestTests.setups['calendar_outages'] = ''' + - do: + xpack.ml.put_calendar: + calendar_id: "planned-outages" +''' +buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages'] + ''' + - do: + xpack.ml.post_calendar_events: + calendar_id: "planned-outages" + body: > + { "description": "event 1", "start_time": "2017-12-01T00:00:00Z", "end_time": "2017-12-02T00:00:00Z", "calendar_id": "planned-outages" } + + +''' +buildRestTests.setups['calendar_outages_openjob'] = buildRestTests.setups['server_metrics_openjob'] + ''' + - do: + xpack.ml.put_calendar: + calendar_id: "planned-outages" +''' +buildRestTests.setups['calendar_outages_addjob'] = buildRestTests.setups['server_metrics_openjob'] + ''' + - do: + xpack.ml.put_calendar: + calendar_id: "planned-outages" + body: > + { + "job_ids": ["total-requests"] + } +''' +buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['calendar_outages_addjob'] + ''' + - do: + xpack.ml.post_calendar_events: + calendar_id: "planned-outages" + body: > + { "events" : [ + { "description": "event 1", "start_time": "1513641600000", "end_time": "1513728000000"}, + { "description": "event 2", "start_time": "1513814400000", "end_time": "1513900800000"}, + { "description": "event 3", "start_time": "1514160000000", "end_time": "1514246400000"} + ]} +''' + + diff --git a/docs/java-rest/high-level/document/update-by-query.asciidoc b/docs/java-rest/high-level/document/update-by-query.asciidoc new file mode 100644 index 00000000000..324385a442b --- /dev/null +++ b/docs/java-rest/high-level/document/update-by-query.asciidoc @@ -0,0 +1,181 @@ +[[java-rest-high-document-update-by-query]] +=== Update By Query API + +[[java-rest-high-document-update-by-query-request]] +==== Update By Query Request + +A `UpdateByQueryRequest` can be used to update documents in an index. + +It requires an existing index (or a set of indices) on which the update is to be performed. + +The simplest form of a `UpdateByQueryRequest` looks like follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request] +-------------------------------------------------- +<1> Creates the `UpdateByQueryRequest` on a set of indices. + +By default version conflicts abort the `UpdateByQueryRequest` process but you can just count them by settings it to +`proceed` in the request body + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-conflicts] +-------------------------------------------------- +<1> Set `proceed` on version conflict + +You can limit the documents by adding a type to the source or by adding a query. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-typeOrQuery] +-------------------------------------------------- +<1> Only copy `doc` type +<2> Only copy documents which have field `user` set to `kimchy` + +It’s also possible to limit the number of processed documents by setting size. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-size] +-------------------------------------------------- +<1> Only copy 10 documents + +By default `UpdateByQueryRequest` uses batches of 1000. You can change the batch size with `setBatchSize`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-scrollSize] +-------------------------------------------------- +<1> Use batches of 100 documents + +Update by query can also use the ingest feature by specifying a `pipeline`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-pipeline] +-------------------------------------------------- +<1> set pipeline to `my_pipeline` + +`UpdateByQueryRequest` also supports a `script` that modifies the document. The following example illustrates that. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-script] +-------------------------------------------------- +<1> `setScript` to increment the `likes` field on all documents with user `kimchy`. + +`UpdateByQueryRequest` also helps in automatically parallelizing using `sliced-scroll` to +slice on `_uid`. Use `setSlices` to specify the number of slices to use. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-slices] +-------------------------------------------------- +<1> set number of slices to use + +`UpdateByQueryRequest` uses the `scroll` parameter to control how long it keeps the "search context" alive. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-scroll] +-------------------------------------------------- +<1> set scroll time + +If you provide routing then the routing is copied to the scroll query, limiting the process to the shards that match +that routing value. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-routing] +-------------------------------------------------- +<1> set routing + + +==== Optional arguments +In addition to the options above the following arguments can optionally be also provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the update by query request to be performed as a `TimeValue` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-refresh] +-------------------------------------------------- +<1> Refresh index after calling update by query + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-request-indicesOptions] +-------------------------------------------------- +<1> Set indices options + + +[[java-rest-high-document-update-by-query-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute] +-------------------------------------------------- + +[[java-rest-high-document-update-by-query-async]] +==== Asynchronous Execution + +The asynchronous execution of an update by query request requires both the `UpdateByQueryRequest` +instance and an `ActionListener` instance to be passed to the asynchronous +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute-async] +-------------------------------------------------- +<1> The `UpdateByQueryRequest` to execute and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `BulkByScrollResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument and contains a list of individual results for each +operation that was executed. Note that one or more operations might have +failed while the others have been successfully executed. +<2> Called when the whole `UpdateByQueryRequest` fails. In this case the raised +exception is provided as an argument and no operation has been executed. + +[[java-rest-high-document-update-by-query-execute-listener-response]] +==== Update By Query Response + +The returned `BulkByScrollResponse` contains information about the executed operations and + allows to iterate over each result as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[update-by-query-response] +-------------------------------------------------- +<1> Get total time taken +<2> Check if the request timed out +<3> Get total number of docs processed +<4> Number of docs that were updated +<5> Number of docs that were deleted +<6> Number of batches that were executed +<7> Number of skipped docs +<8> Number of version conflicts +<9> Number of times request had to retry bulk index operations +<10> Number of times request had to retry search operations +<11> The total time this request has throttled itself not including the current throttle time if it is currently sleeping +<12> Remaining delay of any current throttle sleep or 0 if not sleeping +<13> Failures during search phase +<14> Failures during bulk index operation diff --git a/docs/java-rest/high-level/ml/flush-job.asciidoc b/docs/java-rest/high-level/ml/flush-job.asciidoc new file mode 100644 index 00000000000..1f815bba0d5 --- /dev/null +++ b/docs/java-rest/high-level/ml/flush-job.asciidoc @@ -0,0 +1,83 @@ +[[java-rest-high-x-pack-ml-flush-job]] +=== Flush Job API + +The Flush Job API provides the ability to flush a {ml} job's +datafeed in the cluster. +It accepts a `FlushJobRequest` object and responds +with a `FlushJobResponse` object. + +[[java-rest-high-x-pack-ml-flush-job-request]] +==== Flush Job Request + +A `FlushJobRequest` object gets created with an existing non-null `jobId`. +All other fields are optional for the request. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-request] +-------------------------------------------------- +<1> Constructing a new request referencing an existing `jobId` + +==== Optional Arguments + +The following arguments are optional. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-request-options] +-------------------------------------------------- +<1> Set request to calculate the interim results +<2> Set the advanced time to flush to the particular time value +<3> Set the start time for the range of buckets on which +to calculate the interim results (requires `calc_interim` to be `true`) +<4> Set the end time for the range of buckets on which +to calculate interim results (requires `calc_interim` to be `true`) +<5> Set the skip time to skip a particular time value + +[[java-rest-high-x-pack-ml-flush-job-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-flush-job-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-execute-async] +-------------------------------------------------- +<1> The `FlushJobRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `FlushJobResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-x-pack-ml-flush-job-response]] +==== Flush Job Response + +A `FlushJobResponse` contains an acknowledgement and an optional end date for the +last finalized bucket + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-flush-job-response] +-------------------------------------------------- +<1> `isFlushed()` indicates if the job was successfully flushed or not. +<2> `getLastFinalizedBucketEnd()` provides the timestamp +(in milliseconds-since-the-epoch) of the end of the last bucket that was processed. \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/get-job-stats.asciidoc b/docs/java-rest/high-level/ml/get-job-stats.asciidoc new file mode 100644 index 00000000000..90f7794ae76 --- /dev/null +++ b/docs/java-rest/high-level/ml/get-job-stats.asciidoc @@ -0,0 +1,67 @@ +[[java-rest-high-x-pack-ml-get-job-stats]] +=== Get Job Stats API + +The Get Job Stats API provides the ability to get any number of + {ml} job's statistics in the cluster. +It accepts a `GetJobStatsRequest` object and responds +with a `GetJobStatsResponse` object. + +[[java-rest-high-x-pack-ml-get-job-stats-request]] +==== Get Job Stats Request + +A `GetJobsStatsRequest` object can have any number of `jobId` +entries. However, they all must be non-null. An empty list is the same as +requesting statistics for all jobs. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-request] +-------------------------------------------------- +<1> Constructing a new request referencing existing `jobIds`, can contain wildcards +<2> Whether to ignore if a wildcard expression matches no jobs. + (This includes `_all` string or when no jobs have been specified) + +[[java-rest-high-x-pack-ml-get-job-stats-execution]] +==== Execution + +The request can be executed through the `MachineLearningClient` contained +in the `RestHighLevelClient` object, accessed via the `machineLearningClient()` method. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-ml-get-job-stats-execution-async]] +==== Asynchronous Execution + +The request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-execute-async] +-------------------------------------------------- +<1> The `GetJobsStatsRequest` to execute and the `ActionListener` to use when +the execution completes + +The method does not block and returns immediately. The passed `ActionListener` is used +to notify the caller of completion. A typical `ActionListener` for `GetJobsStatsResponse` may +look like + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-listener] +-------------------------------------------------- +<1> `onResponse` is called back when the action is completed successfully +<2> `onFailure` is called back when some unexpected error occurs + +[[java-rest-high-x-pack-ml-get-job-stats-response]] +==== Get Job Stats Response +The returned `GetJobStatsResponse` contains the requested job statistics: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MlClientDocumentationIT.java[x-pack-ml-get-job-stats-response] +-------------------------------------------------- +<1> `getCount()` indicates the number of jobs statistics found +<2> `getJobStats()` is the collection of {ml} `JobStats` objects found \ No newline at end of file diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 2b72ca74f6a..b791dbc0f8c 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -16,6 +16,7 @@ Multi-document APIs:: * <> * <> * <> +* <> include::document/index.asciidoc[] include::document/get.asciidoc[] @@ -25,6 +26,7 @@ include::document/update.asciidoc[] include::document/bulk.asciidoc[] include::document/multi-get.asciidoc[] include::document/reindex.asciidoc[] +include::document/update-by-query.asciidoc[] == Search APIs @@ -211,6 +213,8 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <> * <> * <> +* <> +* <> * <> * <> @@ -219,6 +223,8 @@ include::ml/get-job.asciidoc[] include::ml/delete-job.asciidoc[] include::ml/open-job.asciidoc[] include::ml/close-job.asciidoc[] +include::ml/flush-job.asciidoc[] +include::ml/get-job-stats.asciidoc[] include::ml/get-buckets.asciidoc[] include::ml/get-records.asciidoc[] diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index 9528b7829e3..d1ea1fad885 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -15,13 +15,14 @@ Which looks like: [source,txt] -------------------------------------------------- node-0 analyze 0 0 0 +... node-0 fetch_shard_started 0 0 0 node-0 fetch_shard_store 0 0 0 node-0 flush 0 0 0 ... node-0 write 0 0 0 -------------------------------------------------- -// TESTRESPONSE[s/\.\.\./(node-0 .+ 0 0 0\n)+/] +// TESTRESPONSE[s/\.\.\./(node-0 \\S+ 0 0 0\n)*/] // TESTRESPONSE[s/\d+/\\d+/ _cat] // The substitutions do two things: // 1. Expect any number of extra thread pools. This allows us to only list a @@ -45,6 +46,7 @@ The second column is the thread pool name -------------------------------------------------- name analyze +ccr (default distro only) fetch_shard_started fetch_shard_store flush @@ -81,6 +83,7 @@ active queue rejected 0 0 0 0 0 0 0 0 0 + 0 0 0 1 0 0 0 0 0 0 0 0 diff --git a/docs/reference/commands/index.asciidoc b/docs/reference/commands/index.asciidoc index 164d2fc0e84..134ac1edbd0 100644 --- a/docs/reference/commands/index.asciidoc +++ b/docs/reference/commands/index.asciidoc @@ -1,11 +1,11 @@ -[role="xpack"] -[[xpack-commands]] -= {xpack} Commands +[[commands]] += Command line tools [partintro] -- -{xpack} includes commands that help you configure security: +{es} provides the following tools for configuring security and performing other +tasks from the command line: * <> * <> diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 54c0c1c1b15..53de67e55fd 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -63,12 +63,6 @@ corruption is detected, it will prevent the shard from being opened. Accepts: Check for both physical and logical corruption. This is much more expensive in terms of CPU and memory usage. -`fix`:: - - Check for both physical and logical corruption. Segments that were reported - as corrupted will be automatically removed. This option *may result in data loss*. - Use with extreme caution! - WARNING: Expert only. Checking shards may take a lot of time on large indices. -- diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index dfe2c46f725..565037c8e9a 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -63,7 +63,7 @@ include::sql/index.asciidoc[] include::monitoring/index.asciidoc[] -include::{xes-repo-dir}/rollup/index.asciidoc[] +include::rollup/index.asciidoc[] include::rest-api/index.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0.asciidoc b/docs/reference/migration/migrate_7_0.asciidoc index 42fd6b7afbe..924a6984dc0 100644 --- a/docs/reference/migration/migrate_7_0.asciidoc +++ b/docs/reference/migration/migrate_7_0.asciidoc @@ -39,6 +39,7 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x. * <> * <> * <> +* <> include::migrate_7_0/aggregations.asciidoc[] include::migrate_7_0/analysis.asciidoc[] @@ -53,4 +54,5 @@ include::migrate_7_0/java.asciidoc[] include::migrate_7_0/settings.asciidoc[] include::migrate_7_0/scripting.asciidoc[] include::migrate_7_0/snapshotstats.asciidoc[] -include::migrate_7_0/restclient.asciidoc[] \ No newline at end of file +include::migrate_7_0/restclient.asciidoc[] +include::migrate_7_0/low_level_restclient.asciidoc[] diff --git a/docs/reference/migration/migrate_7_0/indices.asciidoc b/docs/reference/migration/migrate_7_0/indices.asciidoc index bab7b602220..a47cc6f4324 100644 --- a/docs/reference/migration/migrate_7_0/indices.asciidoc +++ b/docs/reference/migration/migrate_7_0/indices.asciidoc @@ -78,3 +78,7 @@ The parent circuit breaker defines a new setting `indices.breaker.total.use_real heap memory instead of only considering the reserved memory by child circuit breakers. When this setting is `true`, the default parent breaker limit also changes from 70% to 95% of the JVM heap size. The previous behavior can be restored by setting `indices.breaker.total.use_real_memory` to `false`. + +==== `fix` value for `index.shard.check_on_startup` is removed + +Deprecated option value `fix` for setting `index.shard.check_on_startup` is not supported. \ No newline at end of file diff --git a/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc b/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc new file mode 100644 index 00000000000..77f5266763f --- /dev/null +++ b/docs/reference/migration/migrate_7_0/low_level_restclient.asciidoc @@ -0,0 +1,14 @@ +[[breaking_70_low_level_restclient_changes]] +=== Low-level REST client changes + +==== Deprecated flavors of performRequest have been removed + +We deprecated the flavors of `performRequest` and `performRequestAsync` that +do not take `Request` objects in 6.4.0 in favor of the flavors that take +`Request` objects because those methods can be extended without breaking +backwards compatibility. + +==== Removed setHosts + +We deprecated `setHosts` in 6.4.0 in favor of `setNodes` because it supports +host metadata used by the `NodeSelector`. diff --git a/x-pack/docs/en/ml/aggregations.asciidoc b/docs/reference/ml/aggregations.asciidoc similarity index 99% rename from x-pack/docs/en/ml/aggregations.asciidoc rename to docs/reference/ml/aggregations.asciidoc index 07f46501569..4b873ea790b 100644 --- a/x-pack/docs/en/ml/aggregations.asciidoc +++ b/docs/reference/ml/aggregations.asciidoc @@ -41,7 +41,7 @@ PUT _xpack/ml/anomaly_detectors/farequote } ---------------------------------- // CONSOLE -// TEST[setup:farequote_data] +// TEST[skip:setup:farequote_data] In this example, the `airline`, `responsetime`, and `time` fields are aggregations. @@ -90,7 +90,7 @@ PUT _xpack/ml/datafeeds/datafeed-farequote } ---------------------------------- // CONSOLE -// TEST[setup:farequote_job] +// TEST[skip:setup:farequote_job] In this example, the aggregations have names that match the fields that they operate on. That is to say, the `max` aggregation is named `time` and its diff --git a/x-pack/docs/en/rest-api/ml/calendarresource.asciidoc b/docs/reference/ml/apis/calendarresource.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/calendarresource.asciidoc rename to docs/reference/ml/apis/calendarresource.asciidoc index 8edb43ed7a3..4279102cd35 100644 --- a/x-pack/docs/en/rest-api/ml/calendarresource.asciidoc +++ b/docs/reference/ml/apis/calendarresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-calendar-resource]] === Calendar Resources diff --git a/x-pack/docs/en/rest-api/ml/close-job.asciidoc b/docs/reference/ml/apis/close-job.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/close-job.asciidoc rename to docs/reference/ml/apis/close-job.asciidoc index 8e7e8eb0ce8..6dec6402c87 100644 --- a/x-pack/docs/en/rest-api/ml/close-job.asciidoc +++ b/docs/reference/ml/apis/close-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-close-job]] === Close Jobs API ++++ @@ -80,7 +81,7 @@ The following example closes the `total-requests` job: POST _xpack/ml/anomaly_detectors/total-requests/_close -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] When the job is closed, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/datafeedresource.asciidoc b/docs/reference/ml/apis/datafeedresource.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/datafeedresource.asciidoc rename to docs/reference/ml/apis/datafeedresource.asciidoc index 0ffeb6bc89d..6fe0b35d951 100644 --- a/x-pack/docs/en/rest-api/ml/datafeedresource.asciidoc +++ b/docs/reference/ml/apis/datafeedresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-datafeed-resource]] === {dfeed-cap} Resources diff --git a/x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc b/docs/reference/ml/apis/delete-calendar-event.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc rename to docs/reference/ml/apis/delete-calendar-event.asciidoc index ef8dad39dba..8961726f573 100644 --- a/x-pack/docs/en/rest-api/ml/delete-calendar-event.asciidoc +++ b/docs/reference/ml/apis/delete-calendar-event.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-calendar-event]] === Delete Events from Calendar API ++++ @@ -44,7 +45,7 @@ calendar: DELETE _xpack/ml/calendars/planned-outages/events/LS8LJGEBMTCMA-qz49st -------------------------------------------------- // CONSOLE -// TEST[catch:missing] +// TEST[skip:catch:missing] When the event is removed, you receive the following results: [source,js] @@ -53,4 +54,3 @@ When the event is removed, you receive the following results: "acknowledged": true } ---- -// NOTCONSOLE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/delete-calendar-job.asciidoc b/docs/reference/ml/apis/delete-calendar-job.asciidoc similarity index 93% rename from x-pack/docs/en/rest-api/ml/delete-calendar-job.asciidoc rename to docs/reference/ml/apis/delete-calendar-job.asciidoc index 94388c0c4b6..4362a82b5cb 100644 --- a/x-pack/docs/en/rest-api/ml/delete-calendar-job.asciidoc +++ b/docs/reference/ml/apis/delete-calendar-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-calendar-job]] === Delete Jobs from Calendar API ++++ @@ -38,7 +39,7 @@ calendar and `total-requests` job: DELETE _xpack/ml/calendars/planned-outages/jobs/total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_addjob] +// TEST[skip:setup:calendar_outages_addjob] When the job is removed from the calendar, you receive the following results: @@ -50,4 +51,4 @@ results: "job_ids": [] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/delete-calendar.asciidoc b/docs/reference/ml/apis/delete-calendar.asciidoc similarity index 92% rename from x-pack/docs/en/rest-api/ml/delete-calendar.asciidoc rename to docs/reference/ml/apis/delete-calendar.asciidoc index f7673b54574..9f9f3457f24 100644 --- a/x-pack/docs/en/rest-api/ml/delete-calendar.asciidoc +++ b/docs/reference/ml/apis/delete-calendar.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-calendar]] === Delete Calendar API ++++ @@ -40,7 +41,7 @@ The following example deletes the `planned-outages` calendar: DELETE _xpack/ml/calendars/planned-outages -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages] +// TEST[skip:setup:calendar_outages] When the calendar is deleted, you receive the following results: [source,js] @@ -49,4 +50,4 @@ When the calendar is deleted, you receive the following results: "acknowledged": true } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/delete-datafeed.asciidoc b/docs/reference/ml/apis/delete-datafeed.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/delete-datafeed.asciidoc rename to docs/reference/ml/apis/delete-datafeed.asciidoc index db4fd5c177a..996d2c7dd2e 100644 --- a/x-pack/docs/en/rest-api/ml/delete-datafeed.asciidoc +++ b/docs/reference/ml/apis/delete-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-datafeed]] === Delete {dfeeds-cap} API ++++ @@ -47,7 +48,7 @@ The following example deletes the `datafeed-total-requests` {dfeed}: DELETE _xpack/ml/datafeeds/datafeed-total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_datafeed] +// TEST[skip:setup:server_metrics_datafeed] When the {dfeed} is deleted, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/delete-filter.asciidoc b/docs/reference/ml/apis/delete-filter.asciidoc similarity index 92% rename from x-pack/docs/en/rest-api/ml/delete-filter.asciidoc rename to docs/reference/ml/apis/delete-filter.asciidoc index b58d2980b88..21e35b66076 100644 --- a/x-pack/docs/en/rest-api/ml/delete-filter.asciidoc +++ b/docs/reference/ml/apis/delete-filter.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-filter]] === Delete Filter API ++++ @@ -41,7 +42,7 @@ The following example deletes the `safe_domains` filter: DELETE _xpack/ml/filters/safe_domains -------------------------------------------------- // CONSOLE -// TEST[setup:ml_filter_safe_domains] +// TEST[skip:setup:ml_filter_safe_domains] When the filter is deleted, you receive the following results: [source,js] @@ -50,4 +51,4 @@ When the filter is deleted, you receive the following results: "acknowledged": true } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/delete-job.asciidoc b/docs/reference/ml/apis/delete-job.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/delete-job.asciidoc rename to docs/reference/ml/apis/delete-job.asciidoc index c01b08545b6..d5ef120ad04 100644 --- a/x-pack/docs/en/rest-api/ml/delete-job.asciidoc +++ b/docs/reference/ml/apis/delete-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-job]] === Delete Jobs API ++++ @@ -56,7 +57,7 @@ The following example deletes the `total-requests` job: DELETE _xpack/ml/anomaly_detectors/total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] When the job is deleted, you receive the following results: [source,js] @@ -65,4 +66,4 @@ When the job is deleted, you receive the following results: "acknowledged": true } ---- -// TESTRESPONSE +// TESTRESPONSE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/delete-snapshot.asciidoc b/docs/reference/ml/apis/delete-snapshot.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/delete-snapshot.asciidoc rename to docs/reference/ml/apis/delete-snapshot.asciidoc index 2ab0116fe74..96a35900545 100644 --- a/x-pack/docs/en/rest-api/ml/delete-snapshot.asciidoc +++ b/docs/reference/ml/apis/delete-snapshot.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-delete-snapshot]] === Delete Model Snapshots API ++++ @@ -32,7 +33,6 @@ the `model_snapshot_id` in the results from the get jobs API. You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. -//<>. ==== Examples @@ -53,3 +53,4 @@ When the snapshot is deleted, you receive the following results: "acknowledged": true } ---- +// TESTRESPONSE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/eventresource.asciidoc b/docs/reference/ml/apis/eventresource.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/eventresource.asciidoc rename to docs/reference/ml/apis/eventresource.asciidoc index c9ab7896421..a1e96f5c25a 100644 --- a/x-pack/docs/en/rest-api/ml/eventresource.asciidoc +++ b/docs/reference/ml/apis/eventresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-event-resource]] === Scheduled Event Resources diff --git a/x-pack/docs/en/rest-api/ml/filterresource.asciidoc b/docs/reference/ml/apis/filterresource.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/filterresource.asciidoc rename to docs/reference/ml/apis/filterresource.asciidoc index e942447c1ee..e67c92dc8d0 100644 --- a/x-pack/docs/en/rest-api/ml/filterresource.asciidoc +++ b/docs/reference/ml/apis/filterresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-filter-resource]] === Filter Resources diff --git a/x-pack/docs/en/rest-api/ml/flush-job.asciidoc b/docs/reference/ml/apis/flush-job.asciidoc similarity index 92% rename from x-pack/docs/en/rest-api/ml/flush-job.asciidoc rename to docs/reference/ml/apis/flush-job.asciidoc index 934a2d81b17..f19d2aa648f 100644 --- a/x-pack/docs/en/rest-api/ml/flush-job.asciidoc +++ b/docs/reference/ml/apis/flush-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-flush-job]] === Flush Jobs API ++++ @@ -74,7 +75,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_flush } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] When the operation succeeds, you receive the following results: [source,js] @@ -84,7 +85,7 @@ When the operation succeeds, you receive the following results: "last_finalized_bucket_end": 1455234900000 } ---- -// TESTRESPONSE[s/"last_finalized_bucket_end": 1455234900000/"last_finalized_bucket_end": $body.last_finalized_bucket_end/] +//TESTRESPONSE[s/"last_finalized_bucket_end": 1455234900000/"last_finalized_bucket_end": $body.last_finalized_bucket_end/] The `last_finalized_bucket_end` provides the timestamp (in milliseconds-since-the-epoch) of the end of the last bucket that was processed. @@ -101,7 +102,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_flush } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] When the operation succeeds, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/forecast.asciidoc b/docs/reference/ml/apis/forecast.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/forecast.asciidoc rename to docs/reference/ml/apis/forecast.asciidoc index 99647ecae1b..197876f3f04 100644 --- a/x-pack/docs/en/rest-api/ml/forecast.asciidoc +++ b/docs/reference/ml/apis/forecast.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-forecast]] === Forecast Jobs API ++++ diff --git a/x-pack/docs/en/rest-api/ml/get-bucket.asciidoc b/docs/reference/ml/apis/get-bucket.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/get-bucket.asciidoc rename to docs/reference/ml/apis/get-bucket.asciidoc index 95b05ff7f5d..3a276c13e89 100644 --- a/x-pack/docs/en/rest-api/ml/get-bucket.asciidoc +++ b/docs/reference/ml/apis/get-bucket.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-bucket]] === Get Buckets API ++++ @@ -81,7 +82,6 @@ that stores the results. The `machine_learning_admin` and `machine_learning_user roles provide these privileges. For more information, see {xpack-ref}/security-privileges.html[Security Privileges] and {xpack-ref}/built-in-roles.html[Built-in Roles]. -//<> and <>. ==== Examples diff --git a/x-pack/docs/en/rest-api/ml/get-calendar-event.asciidoc b/docs/reference/ml/apis/get-calendar-event.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/get-calendar-event.asciidoc rename to docs/reference/ml/apis/get-calendar-event.asciidoc index e89173c3382..43dd74e47c9 100644 --- a/x-pack/docs/en/rest-api/ml/get-calendar-event.asciidoc +++ b/docs/reference/ml/apis/get-calendar-event.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-calendar-event]] === Get Scheduled Events API ++++ @@ -66,7 +67,7 @@ The following example gets information about the scheduled events in the GET _xpack/ml/calendars/planned-outages/events -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_addevent] +// TEST[skip:setup:calendar_outages_addevent] The API returns the following results: diff --git a/x-pack/docs/en/rest-api/ml/get-calendar.asciidoc b/docs/reference/ml/apis/get-calendar.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/get-calendar.asciidoc rename to docs/reference/ml/apis/get-calendar.asciidoc index ae95fd99688..f86875f326c 100644 --- a/x-pack/docs/en/rest-api/ml/get-calendar.asciidoc +++ b/docs/reference/ml/apis/get-calendar.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-calendar]] === Get Calendars API ++++ @@ -62,7 +63,7 @@ calendar: GET _xpack/ml/calendars/planned-outages -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_addjob] +// TEST[skip:setup:calendar_outages_addjob] The API returns the following results: [source,js] @@ -79,4 +80,4 @@ The API returns the following results: ] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/get-category.asciidoc b/docs/reference/ml/apis/get-category.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/get-category.asciidoc rename to docs/reference/ml/apis/get-category.asciidoc index 13f274133c0..e5d6fe16802 100644 --- a/x-pack/docs/en/rest-api/ml/get-category.asciidoc +++ b/docs/reference/ml/apis/get-category.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-category]] === Get Categories API ++++ @@ -18,7 +19,6 @@ Retrieves job results for one or more categories. For more information about categories, see {xpack-ref}/ml-configuring-categories.html[Categorizing Log Messages]. -//<>. ==== Path Parameters @@ -56,7 +56,6 @@ that stores the results. The `machine_learning_admin` and `machine_learning_user roles provide these privileges. For more information, see {xpack-ref}/security-privileges.html[Security Privileges] and {xpack-ref}/built-in-roles.html[Built-in Roles]. -//<> and <>. ==== Examples diff --git a/x-pack/docs/en/rest-api/ml/get-datafeed-stats.asciidoc b/docs/reference/ml/apis/get-datafeed-stats.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/ml/get-datafeed-stats.asciidoc rename to docs/reference/ml/apis/get-datafeed-stats.asciidoc index 2869e8222f8..9ca67cc17fb 100644 --- a/x-pack/docs/en/rest-api/ml/get-datafeed-stats.asciidoc +++ b/docs/reference/ml/apis/get-datafeed-stats.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-datafeed-stats]] === Get {dfeed-cap} Statistics API ++++ @@ -66,7 +67,7 @@ The following example gets usage information for the GET _xpack/ml/datafeeds/datafeed-total-requests/_stats -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_startdf] +// TEST[skip:setup:server_metrics_startdf] The API returns the following results: [source,js] @@ -97,4 +98,4 @@ The API returns the following results: // TESTRESPONSE[s/"node-0"/$body.$_path/] // TESTRESPONSE[s/"hoXMLZB0RWKfR9UPPUCxXX"/$body.$_path/] // TESTRESPONSE[s/"127.0.0.1:9300"/$body.$_path/] -// TESTRESPONSE[s/"17179869184"/$body.datafeeds.0.node.attributes.ml\\.machine_memory/] +// TESTRESPONSE[s/"17179869184"/$body.datafeeds.0.node.attributes.ml\\.machine_memory/] \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/get-datafeed.asciidoc b/docs/reference/ml/apis/get-datafeed.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/ml/get-datafeed.asciidoc rename to docs/reference/ml/apis/get-datafeed.asciidoc index 0fa51773fd1..db5f4249669 100644 --- a/x-pack/docs/en/rest-api/ml/get-datafeed.asciidoc +++ b/docs/reference/ml/apis/get-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-datafeed]] === Get {dfeeds-cap} API ++++ @@ -60,7 +61,7 @@ The following example gets configuration information for the GET _xpack/ml/datafeeds/datafeed-total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_datafeed] +// TEST[skip:setup:server_metrics_datafeed] The API returns the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/get-filter.asciidoc b/docs/reference/ml/apis/get-filter.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/get-filter.asciidoc rename to docs/reference/ml/apis/get-filter.asciidoc index b4699e9d622..2dbb5d16cc5 100644 --- a/x-pack/docs/en/rest-api/ml/get-filter.asciidoc +++ b/docs/reference/ml/apis/get-filter.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-filter]] === Get Filters API ++++ @@ -62,7 +63,7 @@ filter: GET _xpack/ml/filters/safe_domains -------------------------------------------------- // CONSOLE -// TEST[setup:ml_filter_safe_domains] +// TEST[skip:setup:ml_filter_safe_domains] The API returns the following results: [source,js] @@ -81,4 +82,4 @@ The API returns the following results: ] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/get-influencer.asciidoc b/docs/reference/ml/apis/get-influencer.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-influencer.asciidoc rename to docs/reference/ml/apis/get-influencer.asciidoc index bffd2b8e096..182cca7aa99 100644 --- a/x-pack/docs/en/rest-api/ml/get-influencer.asciidoc +++ b/docs/reference/ml/apis/get-influencer.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-influencer]] === Get Influencers API ++++ diff --git a/x-pack/docs/en/rest-api/ml/get-job-stats.asciidoc b/docs/reference/ml/apis/get-job-stats.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-job-stats.asciidoc rename to docs/reference/ml/apis/get-job-stats.asciidoc index bd59ee8b258..509d9448a69 100644 --- a/x-pack/docs/en/rest-api/ml/get-job-stats.asciidoc +++ b/docs/reference/ml/apis/get-job-stats.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-job-stats]] === Get Job Statistics API ++++ diff --git a/x-pack/docs/en/rest-api/ml/get-job.asciidoc b/docs/reference/ml/apis/get-job.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/get-job.asciidoc rename to docs/reference/ml/apis/get-job.asciidoc index 2e95d8e01bb..c669ac6034e 100644 --- a/x-pack/docs/en/rest-api/ml/get-job.asciidoc +++ b/docs/reference/ml/apis/get-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-job]] === Get Jobs API ++++ @@ -59,7 +60,7 @@ The following example gets configuration information for the `total-requests` jo GET _xpack/ml/anomaly_detectors/total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] The API returns the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/get-overall-buckets.asciidoc b/docs/reference/ml/apis/get-overall-buckets.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-overall-buckets.asciidoc rename to docs/reference/ml/apis/get-overall-buckets.asciidoc index f2581f4904e..f4818f3bbbe 100644 --- a/x-pack/docs/en/rest-api/ml/get-overall-buckets.asciidoc +++ b/docs/reference/ml/apis/get-overall-buckets.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-overall-buckets]] === Get Overall Buckets API ++++ @@ -93,7 +94,6 @@ that stores the results. The `machine_learning_admin` and `machine_learning_user roles provide these privileges. For more information, see {xpack-ref}/security-privileges.html[Security Privileges] and {xpack-ref}/built-in-roles.html[Built-in Roles]. -//<> and <>. ==== Examples diff --git a/x-pack/docs/en/rest-api/ml/get-record.asciidoc b/docs/reference/ml/apis/get-record.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-record.asciidoc rename to docs/reference/ml/apis/get-record.asciidoc index 1870b441597..199cce15484 100644 --- a/x-pack/docs/en/rest-api/ml/get-record.asciidoc +++ b/docs/reference/ml/apis/get-record.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-record]] === Get Records API ++++ diff --git a/x-pack/docs/en/rest-api/ml/get-snapshot.asciidoc b/docs/reference/ml/apis/get-snapshot.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/get-snapshot.asciidoc rename to docs/reference/ml/apis/get-snapshot.asciidoc index 24e82af1f19..e194d944b63 100644 --- a/x-pack/docs/en/rest-api/ml/get-snapshot.asciidoc +++ b/docs/reference/ml/apis/get-snapshot.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-get-snapshot]] === Get Model Snapshots API ++++ diff --git a/x-pack/docs/en/rest-api/ml/jobcounts.asciidoc b/docs/reference/ml/apis/jobcounts.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/jobcounts.asciidoc rename to docs/reference/ml/apis/jobcounts.asciidoc index d343cc23ae0..d0169e228d5 100644 --- a/x-pack/docs/en/rest-api/ml/jobcounts.asciidoc +++ b/docs/reference/ml/apis/jobcounts.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-jobstats]] === Job Statistics diff --git a/x-pack/docs/en/rest-api/ml/jobresource.asciidoc b/docs/reference/ml/apis/jobresource.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/jobresource.asciidoc rename to docs/reference/ml/apis/jobresource.asciidoc index 5b109b1c21d..e0c314724e7 100644 --- a/x-pack/docs/en/rest-api/ml/jobresource.asciidoc +++ b/docs/reference/ml/apis/jobresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-job-resource]] === Job Resources diff --git a/x-pack/docs/en/rest-api/ml-api.asciidoc b/docs/reference/ml/apis/ml-api.asciidoc similarity index 61% rename from x-pack/docs/en/rest-api/ml-api.asciidoc rename to docs/reference/ml/apis/ml-api.asciidoc index b48e9f93404..b8509f22152 100644 --- a/x-pack/docs/en/rest-api/ml-api.asciidoc +++ b/docs/reference/ml/apis/ml-api.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-apis]] == Machine Learning APIs @@ -70,57 +71,57 @@ machine learning APIs and in advanced job configuration options in Kibana. * <> //ADD -include::ml/post-calendar-event.asciidoc[] -include::ml/put-calendar-job.asciidoc[] +include::post-calendar-event.asciidoc[] +include::put-calendar-job.asciidoc[] //CLOSE -include::ml/close-job.asciidoc[] +include::close-job.asciidoc[] //CREATE -include::ml/put-calendar.asciidoc[] -include::ml/put-datafeed.asciidoc[] -include::ml/put-filter.asciidoc[] -include::ml/put-job.asciidoc[] +include::put-calendar.asciidoc[] +include::put-datafeed.asciidoc[] +include::put-filter.asciidoc[] +include::put-job.asciidoc[] //DELETE -include::ml/delete-calendar.asciidoc[] -include::ml/delete-datafeed.asciidoc[] -include::ml/delete-calendar-event.asciidoc[] -include::ml/delete-filter.asciidoc[] -include::ml/delete-job.asciidoc[] -include::ml/delete-calendar-job.asciidoc[] -include::ml/delete-snapshot.asciidoc[] +include::delete-calendar.asciidoc[] +include::delete-datafeed.asciidoc[] +include::delete-calendar-event.asciidoc[] +include::delete-filter.asciidoc[] +include::delete-job.asciidoc[] +include::delete-calendar-job.asciidoc[] +include::delete-snapshot.asciidoc[] //FLUSH -include::ml/flush-job.asciidoc[] +include::flush-job.asciidoc[] //FORECAST -include::ml/forecast.asciidoc[] +include::forecast.asciidoc[] //GET -include::ml/get-calendar.asciidoc[] -include::ml/get-bucket.asciidoc[] -include::ml/get-overall-buckets.asciidoc[] -include::ml/get-category.asciidoc[] -include::ml/get-datafeed.asciidoc[] -include::ml/get-datafeed-stats.asciidoc[] -include::ml/get-influencer.asciidoc[] -include::ml/get-job.asciidoc[] -include::ml/get-job-stats.asciidoc[] -include::ml/get-snapshot.asciidoc[] -include::ml/get-calendar-event.asciidoc[] -include::ml/get-filter.asciidoc[] -include::ml/get-record.asciidoc[] +include::get-calendar.asciidoc[] +include::get-bucket.asciidoc[] +include::get-overall-buckets.asciidoc[] +include::get-category.asciidoc[] +include::get-datafeed.asciidoc[] +include::get-datafeed-stats.asciidoc[] +include::get-influencer.asciidoc[] +include::get-job.asciidoc[] +include::get-job-stats.asciidoc[] +include::get-snapshot.asciidoc[] +include::get-calendar-event.asciidoc[] +include::get-filter.asciidoc[] +include::get-record.asciidoc[] //OPEN -include::ml/open-job.asciidoc[] +include::open-job.asciidoc[] //POST -include::ml/post-data.asciidoc[] +include::post-data.asciidoc[] //PREVIEW -include::ml/preview-datafeed.asciidoc[] +include::preview-datafeed.asciidoc[] //REVERT -include::ml/revert-snapshot.asciidoc[] +include::revert-snapshot.asciidoc[] //START/STOP -include::ml/start-datafeed.asciidoc[] -include::ml/stop-datafeed.asciidoc[] +include::start-datafeed.asciidoc[] +include::stop-datafeed.asciidoc[] //UPDATE -include::ml/update-datafeed.asciidoc[] -include::ml/update-filter.asciidoc[] -include::ml/update-job.asciidoc[] -include::ml/update-snapshot.asciidoc[] +include::update-datafeed.asciidoc[] +include::update-filter.asciidoc[] +include::update-job.asciidoc[] +include::update-snapshot.asciidoc[] //VALIDATE -//include::ml/validate-detector.asciidoc[] -//include::ml/validate-job.asciidoc[] +//include::validate-detector.asciidoc[] +//include::validate-job.asciidoc[] diff --git a/x-pack/docs/en/rest-api/ml/open-job.asciidoc b/docs/reference/ml/apis/open-job.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/open-job.asciidoc rename to docs/reference/ml/apis/open-job.asciidoc index 59d5568ac77..c1e5977b734 100644 --- a/x-pack/docs/en/rest-api/ml/open-job.asciidoc +++ b/docs/reference/ml/apis/open-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-open-job]] === Open Jobs API ++++ @@ -56,7 +57,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_open } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] When the job opens, you receive the following results: [source,js] @@ -65,5 +66,4 @@ When the job opens, you receive the following results: "opened": true } ---- -//CONSOLE // TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/post-calendar-event.asciidoc b/docs/reference/ml/apis/post-calendar-event.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/ml/post-calendar-event.asciidoc rename to docs/reference/ml/apis/post-calendar-event.asciidoc index 41af0841d2e..998db409fc7 100644 --- a/x-pack/docs/en/rest-api/ml/post-calendar-event.asciidoc +++ b/docs/reference/ml/apis/post-calendar-event.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-post-calendar-event]] === Add Events to Calendar API ++++ @@ -52,7 +53,7 @@ POST _xpack/ml/calendars/planned-outages/events } -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_addjob] +// TEST[skip:setup:calendar_outages_addjob] The API returns the following results: @@ -81,7 +82,7 @@ The API returns the following results: ] } ---- -//TESTRESPONSE +// TESTRESPONSE For more information about these properties, see <>. diff --git a/x-pack/docs/en/rest-api/ml/post-data.asciidoc b/docs/reference/ml/apis/post-data.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/post-data.asciidoc rename to docs/reference/ml/apis/post-data.asciidoc index 40354d7f6f7..6a5a3d3d6cb 100644 --- a/x-pack/docs/en/rest-api/ml/post-data.asciidoc +++ b/docs/reference/ml/apis/post-data.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-post-data]] === Post Data to Jobs API ++++ diff --git a/x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc b/docs/reference/ml/apis/preview-datafeed.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc rename to docs/reference/ml/apis/preview-datafeed.asciidoc index 637b506cb9a..7b9eccd9a59 100644 --- a/x-pack/docs/en/rest-api/ml/preview-datafeed.asciidoc +++ b/docs/reference/ml/apis/preview-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-preview-datafeed]] === Preview {dfeeds-cap} API ++++ @@ -53,7 +54,7 @@ The following example obtains a preview of the `datafeed-farequote` {dfeed}: GET _xpack/ml/datafeeds/datafeed-farequote/_preview -------------------------------------------------- // CONSOLE -// TEST[setup:farequote_datafeed] +// TEST[skip:setup:farequote_datafeed] The data that is returned for this example is as follows: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/put-calendar-job.asciidoc b/docs/reference/ml/apis/put-calendar-job.asciidoc similarity index 93% rename from x-pack/docs/en/rest-api/ml/put-calendar-job.asciidoc rename to docs/reference/ml/apis/put-calendar-job.asciidoc index 6940957b159..0563047043a 100644 --- a/x-pack/docs/en/rest-api/ml/put-calendar-job.asciidoc +++ b/docs/reference/ml/apis/put-calendar-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-calendar-job]] === Add Jobs to Calendar API ++++ @@ -38,7 +39,7 @@ The following example associates the `planned-outages` calendar with the PUT _xpack/ml/calendars/planned-outages/jobs/total-requests -------------------------------------------------- // CONSOLE -// TEST[setup:calendar_outages_openjob] +// TEST[skip:setup:calendar_outages_openjob] The API returns the following results: @@ -51,4 +52,4 @@ The API returns the following results: ] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/put-calendar.asciidoc b/docs/reference/ml/apis/put-calendar.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/put-calendar.asciidoc rename to docs/reference/ml/apis/put-calendar.asciidoc index a82da5a2c0c..06b8e55d774 100644 --- a/x-pack/docs/en/rest-api/ml/put-calendar.asciidoc +++ b/docs/reference/ml/apis/put-calendar.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-calendar]] === Create Calendar API ++++ @@ -44,6 +45,7 @@ The following example creates the `planned-outages` calendar: PUT _xpack/ml/calendars/planned-outages -------------------------------------------------- // CONSOLE +// TEST[skip:need-license] When the calendar is created, you receive the following results: [source,js] @@ -53,4 +55,4 @@ When the calendar is created, you receive the following results: "job_ids": [] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/put-datafeed.asciidoc rename to docs/reference/ml/apis/put-datafeed.asciidoc index 6b8ad932a1d..b5c99fc8e36 100644 --- a/x-pack/docs/en/rest-api/ml/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-datafeed]] === Create {dfeeds-cap} API ++++ @@ -107,7 +108,7 @@ PUT _xpack/ml/datafeeds/datafeed-total-requests } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] When the {dfeed} is created, you receive the following results: [source,js] @@ -132,4 +133,4 @@ When the {dfeed} is created, you receive the following results: } ---- // TESTRESPONSE[s/"query_delay": "83474ms"/"query_delay": $body.query_delay/] -// TESTRESPONSE[s/"query.boost": "1.0"/"query.boost": $body.query.boost/] +// TESTRESPONSE[s/"query.boost": "1.0"/"query.boost": $body.query.boost/] \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/put-filter.asciidoc b/docs/reference/ml/apis/put-filter.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/put-filter.asciidoc rename to docs/reference/ml/apis/put-filter.asciidoc index d2982a56f61..165fe969758 100644 --- a/x-pack/docs/en/rest-api/ml/put-filter.asciidoc +++ b/docs/reference/ml/apis/put-filter.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-filter]] === Create Filter API ++++ @@ -55,6 +56,7 @@ PUT _xpack/ml/filters/safe_domains } -------------------------------------------------- // CONSOLE +// TEST[skip:need-licence] When the filter is created, you receive the following response: [source,js] @@ -65,4 +67,4 @@ When the filter is created, you receive the following response: "items": ["*.google.com", "wikipedia.org"] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/put-job.asciidoc b/docs/reference/ml/apis/put-job.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/put-job.asciidoc rename to docs/reference/ml/apis/put-job.asciidoc index 1c436f53d32..ce053484906 100644 --- a/x-pack/docs/en/rest-api/ml/put-job.asciidoc +++ b/docs/reference/ml/apis/put-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-put-job]] === Create Jobs API ++++ @@ -104,6 +105,7 @@ PUT _xpack/ml/anomaly_detectors/total-requests } -------------------------------------------------- // CONSOLE +// TEST[skip:need-licence] When the job is created, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/resultsresource.asciidoc b/docs/reference/ml/apis/resultsresource.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/resultsresource.asciidoc rename to docs/reference/ml/apis/resultsresource.asciidoc index c28ed72aedb..d3abd094be7 100644 --- a/x-pack/docs/en/rest-api/ml/resultsresource.asciidoc +++ b/docs/reference/ml/apis/resultsresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-results-resource]] === Results Resources diff --git a/x-pack/docs/en/rest-api/ml/revert-snapshot.asciidoc b/docs/reference/ml/apis/revert-snapshot.asciidoc similarity index 67% rename from x-pack/docs/en/rest-api/ml/revert-snapshot.asciidoc rename to docs/reference/ml/apis/revert-snapshot.asciidoc index 1dc3046ac4f..48fc65edf90 100644 --- a/x-pack/docs/en/rest-api/ml/revert-snapshot.asciidoc +++ b/docs/reference/ml/apis/revert-snapshot.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-revert-snapshot]] === Revert Model Snapshots API ++++ @@ -22,33 +23,6 @@ then it might be appropriate to reset the model state to a time before this event. For example, you might consider reverting to a saved snapshot after Black Friday or a critical system failure. -//// -To revert to a saved snapshot, you must follow this sequence: -. Close the job -. Revert to a snapshot -. Open the job -. Send new data to the job - -When reverting to a snapshot, there is a choice to make about whether or not -you want to keep the results that were created between the time of the snapshot -and the current time. In the case of Black Friday for instance, you might want -to keep the results and carry on processing data from the current time, -though without the models learning the one-off behavior and compensating for it. -However, say in the event of a critical system failure and you decide to reset -and models to a previous known good state and process data from that time, -it makes sense to delete the intervening results for the known bad period and -resend data from that earlier time. - -Any gaps in data since the snapshot time will be treated as nulls and not modeled. -If there is a partial bucket at the end of the snapshot and/or at the beginning -of the new input data, then this will be ignored and treated as a gap. - -For jobs with many entities, the model state may be very large. -If a model state is several GB, this could take 10-20 mins to revert depending -upon machine spec and resources. If this is the case, please ensure this time -is planned for. -Model size (in bytes) is available as part of the Job Resource Model Size Stats. -//// IMPORTANT: Before you revert to a saved snapshot, you must close the job. @@ -77,7 +51,6 @@ If you want to resend data, then delete the intervening results. You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. -//<>. ==== Examples diff --git a/x-pack/docs/en/rest-api/ml/snapshotresource.asciidoc b/docs/reference/ml/apis/snapshotresource.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/ml/snapshotresource.asciidoc rename to docs/reference/ml/apis/snapshotresource.asciidoc index fb2e3d83de6..f068f6d94ed 100644 --- a/x-pack/docs/en/rest-api/ml/snapshotresource.asciidoc +++ b/docs/reference/ml/apis/snapshotresource.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-snapshot-resource]] === Model Snapshot Resources diff --git a/x-pack/docs/en/rest-api/ml/start-datafeed.asciidoc b/docs/reference/ml/apis/start-datafeed.asciidoc similarity index 97% rename from x-pack/docs/en/rest-api/ml/start-datafeed.asciidoc rename to docs/reference/ml/apis/start-datafeed.asciidoc index fa3ea35a751..566e700dd04 100644 --- a/x-pack/docs/en/rest-api/ml/start-datafeed.asciidoc +++ b/docs/reference/ml/apis/start-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-start-datafeed]] === Start {dfeeds-cap} API ++++ @@ -79,7 +80,6 @@ of the latest processed record. You must have `manage_ml`, or `manage` cluster privileges to use this API. For more information, see {xpack-ref}/security-privileges.html[Security Privileges]. -//<>. ==== Security Integration @@ -101,7 +101,7 @@ POST _xpack/ml/datafeeds/datafeed-total-requests/_start } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] When the {dfeed} starts, you receive the following results: [source,js] @@ -110,5 +110,4 @@ When the {dfeed} starts, you receive the following results: "started": true } ---- -// CONSOLE -// TESTRESPONSE +// TESTRESPONSE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/stop-datafeed.asciidoc b/docs/reference/ml/apis/stop-datafeed.asciidoc similarity index 92% rename from x-pack/docs/en/rest-api/ml/stop-datafeed.asciidoc rename to docs/reference/ml/apis/stop-datafeed.asciidoc index 27872ff5a20..7ea48974f2d 100644 --- a/x-pack/docs/en/rest-api/ml/stop-datafeed.asciidoc +++ b/docs/reference/ml/apis/stop-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-stop-datafeed]] === Stop {dfeeds-cap} API ++++ @@ -18,7 +19,6 @@ A {dfeed} can be started and stopped multiple times throughout its lifecycle. `POST _xpack/ml/datafeeds/_all/_stop` -//TBD: Can there be spaces between the items in the list? ===== Description @@ -63,14 +63,14 @@ POST _xpack/ml/datafeeds/datafeed-total-requests/_stop } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_startdf] +// TEST[skip:setup:server_metrics_startdf] When the {dfeed} stops, you receive the following results: + [source,js] ---- { "stopped": true } ---- -// CONSOLE -// TESTRESPONSE +// TESTRESPONSE \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/update-datafeed.asciidoc b/docs/reference/ml/apis/update-datafeed.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/update-datafeed.asciidoc rename to docs/reference/ml/apis/update-datafeed.asciidoc index bc9462347c1..be55d864c87 100644 --- a/x-pack/docs/en/rest-api/ml/update-datafeed.asciidoc +++ b/docs/reference/ml/apis/update-datafeed.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-update-datafeed]] === Update {dfeeds-cap} API ++++ @@ -106,7 +107,7 @@ POST _xpack/ml/datafeeds/datafeed-total-requests/_update } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_datafeed] +// TEST[skip:setup:server_metrics_datafeed] When the {dfeed} is updated, you receive the full {dfeed} configuration with with the updated values: diff --git a/x-pack/docs/en/rest-api/ml/update-filter.asciidoc b/docs/reference/ml/apis/update-filter.asciidoc similarity index 94% rename from x-pack/docs/en/rest-api/ml/update-filter.asciidoc rename to docs/reference/ml/apis/update-filter.asciidoc index 1b6760dfed6..f551c8e923b 100644 --- a/x-pack/docs/en/rest-api/ml/update-filter.asciidoc +++ b/docs/reference/ml/apis/update-filter.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-update-filter]] === Update Filter API ++++ @@ -52,7 +53,7 @@ POST _xpack/ml/filters/safe_domains/_update } -------------------------------------------------- // CONSOLE -// TEST[setup:ml_filter_safe_domains] +// TEST[skip:setup:ml_filter_safe_domains] The API returns the following results: @@ -64,4 +65,4 @@ The API returns the following results: "items": ["*.google.com", "*.myorg.com"] } ---- -//TESTRESPONSE +// TESTRESPONSE diff --git a/x-pack/docs/en/rest-api/ml/update-job.asciidoc b/docs/reference/ml/apis/update-job.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/update-job.asciidoc rename to docs/reference/ml/apis/update-job.asciidoc index 852745e9dd9..58bfb2679d9 100644 --- a/x-pack/docs/en/rest-api/ml/update-job.asciidoc +++ b/docs/reference/ml/apis/update-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-update-job]] === Update Jobs API ++++ @@ -121,7 +122,7 @@ POST _xpack/ml/anomaly_detectors/total-requests/_update } -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_job] +// TEST[skip:setup:server_metrics_job] When the job is updated, you receive a summary of the job configuration information, including the updated property values. For example: @@ -177,4 +178,4 @@ information, including the updated property values. For example: } ---- // TESTRESPONSE[s/"job_version": "7.0.0-alpha1"/"job_version": $body.job_version/] -// TESTRESPONSE[s/"create_time": 1518808660505/"create_time": $body.create_time/] +// TESTRESPONSE[s/"create_time": 1518808660505/"create_time": $body.create_time/] \ No newline at end of file diff --git a/x-pack/docs/en/rest-api/ml/update-snapshot.asciidoc b/docs/reference/ml/apis/update-snapshot.asciidoc similarity index 98% rename from x-pack/docs/en/rest-api/ml/update-snapshot.asciidoc rename to docs/reference/ml/apis/update-snapshot.asciidoc index 8c98a7b7321..b58eebe810f 100644 --- a/x-pack/docs/en/rest-api/ml/update-snapshot.asciidoc +++ b/docs/reference/ml/apis/update-snapshot.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-update-snapshot]] === Update Model Snapshots API ++++ diff --git a/x-pack/docs/en/rest-api/ml/validate-detector.asciidoc b/docs/reference/ml/apis/validate-detector.asciidoc similarity index 95% rename from x-pack/docs/en/rest-api/ml/validate-detector.asciidoc rename to docs/reference/ml/apis/validate-detector.asciidoc index ab8a0de442c..e525b1a1b20 100644 --- a/x-pack/docs/en/rest-api/ml/validate-detector.asciidoc +++ b/docs/reference/ml/apis/validate-detector.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-valid-detector]] === Validate Detectors API ++++ @@ -44,6 +45,7 @@ POST _xpack/ml/anomaly_detectors/_validate/detector } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] When the validation completes, you receive the following results: [source,js] diff --git a/x-pack/docs/en/rest-api/ml/validate-job.asciidoc b/docs/reference/ml/apis/validate-job.asciidoc similarity index 96% rename from x-pack/docs/en/rest-api/ml/validate-job.asciidoc rename to docs/reference/ml/apis/validate-job.asciidoc index 0ccc5bc04e1..b8326058260 100644 --- a/x-pack/docs/en/rest-api/ml/validate-job.asciidoc +++ b/docs/reference/ml/apis/validate-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="platinum"] [[ml-valid-job]] === Validate Jobs API ++++ @@ -55,6 +56,7 @@ POST _xpack/ml/anomaly_detectors/_validate } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] When the validation is complete, you receive the following results: [source,js] diff --git a/x-pack/docs/en/ml/categories.asciidoc b/docs/reference/ml/categories.asciidoc similarity index 99% rename from x-pack/docs/en/ml/categories.asciidoc rename to docs/reference/ml/categories.asciidoc index 21f71b871cb..03ebc8af76e 100644 --- a/x-pack/docs/en/ml/categories.asciidoc +++ b/docs/reference/ml/categories.asciidoc @@ -44,6 +44,7 @@ PUT _xpack/ml/anomaly_detectors/it_ops_new_logs } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> The `categorization_field_name` property indicates which field will be categorized. <2> The resulting categories are used in a detector by setting `by_field_name`, @@ -127,6 +128,7 @@ PUT _xpack/ml/anomaly_detectors/it_ops_new_logs2 } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> The {ref}/analysis-pattern-replace-charfilter.html[`pattern_replace` character filter] here achieves exactly the same as the `categorization_filters` in the first @@ -193,6 +195,7 @@ PUT _xpack/ml/anomaly_detectors/it_ops_new_logs3 } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> Tokens basically consist of hyphens, digits, letters, underscores and dots. <2> By default, categorization ignores tokens that begin with a digit. <3> By default, categorization also ignores tokens that are hexadecimal numbers. diff --git a/x-pack/docs/en/ml/configuring.asciidoc b/docs/reference/ml/configuring.asciidoc similarity index 88% rename from x-pack/docs/en/ml/configuring.asciidoc rename to docs/reference/ml/configuring.asciidoc index e35f046a82b..9b6149d662a 100644 --- a/x-pack/docs/en/ml/configuring.asciidoc +++ b/docs/reference/ml/configuring.asciidoc @@ -36,20 +36,20 @@ The scenarios in this section describe some best practices for generating useful * <> * <> -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/customurl.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/customurl.asciidoc include::customurl.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/aggregations.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/aggregations.asciidoc include::aggregations.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/categories.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/categories.asciidoc include::categories.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/populations.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/populations.asciidoc include::populations.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/transforms.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/transforms.asciidoc include::transforms.asciidoc[] -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/detector-custom-rules.asciidoc +:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/docs/reference/ml/detector-custom-rules.asciidoc include::detector-custom-rules.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/ml/customurl.asciidoc b/docs/reference/ml/customurl.asciidoc similarity index 99% rename from x-pack/docs/en/ml/customurl.asciidoc rename to docs/reference/ml/customurl.asciidoc index 7c197084c0e..95f4f5f938f 100644 --- a/x-pack/docs/en/ml/customurl.asciidoc +++ b/docs/reference/ml/customurl.asciidoc @@ -106,7 +106,7 @@ POST _xpack/ml/anomaly_detectors/sample_job/_update } ---------------------------------- //CONSOLE -//TEST[setup:sample_job] +//TEST[skip:setup:sample_job] When you click this custom URL in the anomalies table in {kib}, it opens up the *Discover* page and displays source data for the period one hour before and diff --git a/x-pack/docs/en/ml/detector-custom-rules.asciidoc b/docs/reference/ml/detector-custom-rules.asciidoc similarity index 97% rename from x-pack/docs/en/ml/detector-custom-rules.asciidoc rename to docs/reference/ml/detector-custom-rules.asciidoc index 8513c7e4d25..02881f4cc43 100644 --- a/x-pack/docs/en/ml/detector-custom-rules.asciidoc +++ b/docs/reference/ml/detector-custom-rules.asciidoc @@ -39,6 +39,7 @@ PUT _xpack/ml/filters/safe_domains } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] Now, we can create our job specifying a scope that uses the `safe_domains` filter for the `highest_registered_domain` field: @@ -70,6 +71,7 @@ PUT _xpack/ml/anomaly_detectors/dns_exfiltration_with_rule } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] As time advances and we see more data and more results, we might encounter new domains that we want to add in the filter. We can do that by using the @@ -83,7 +85,7 @@ POST _xpack/ml/filters/safe_domains/_update } ---------------------------------- // CONSOLE -// TEST[setup:ml_filter_safe_domains] +// TEST[skip:setup:ml_filter_safe_domains] Note that we can use any of the `partition_field_name`, `over_field_name`, or `by_field_name` fields in the `scope`. @@ -123,6 +125,7 @@ PUT _xpack/ml/anomaly_detectors/scoping_multiple_fields } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] Such a detector will skip results when the values of all 3 scoped fields are included in the referenced filters. @@ -166,6 +169,7 @@ PUT _xpack/ml/anomaly_detectors/cpu_with_rule } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] When there are multiple conditions they are combined with a logical `and`. This is useful when we want the rule to apply to a range. We simply create @@ -205,6 +209,7 @@ PUT _xpack/ml/anomaly_detectors/rule_with_range } ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] ==== Custom rules in the life-cycle of a job diff --git a/x-pack/docs/en/ml/functions.asciidoc b/docs/reference/ml/functions.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions.asciidoc rename to docs/reference/ml/functions.asciidoc diff --git a/x-pack/docs/en/ml/functions/count.asciidoc b/docs/reference/ml/functions/count.asciidoc similarity index 97% rename from x-pack/docs/en/ml/functions/count.asciidoc rename to docs/reference/ml/functions/count.asciidoc index a2dc5645b61..abbbd118ffe 100644 --- a/x-pack/docs/en/ml/functions/count.asciidoc +++ b/docs/reference/ml/functions/count.asciidoc @@ -59,6 +59,7 @@ PUT _xpack/ml/anomaly_detectors/example1 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] This example is probably the simplest possible analysis. It identifies time buckets during which the overall count of events is higher or lower than @@ -86,6 +87,7 @@ PUT _xpack/ml/anomaly_detectors/example2 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you use this `high_count` function in a detector in your job, it models the event rate for each error code. It detects users that generate an @@ -110,6 +112,7 @@ PUT _xpack/ml/anomaly_detectors/example3 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] In this example, the function detects when the count of events for a status code is lower than usual. @@ -136,6 +139,7 @@ PUT _xpack/ml/anomaly_detectors/example4 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you are analyzing an aggregated `events_per_min` field, do not use a sum function (for example, `sum(events_per_min)`). Instead, use the count function @@ -200,6 +204,7 @@ PUT _xpack/ml/anomaly_detectors/example5 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you use this `high_non_zero_count` function in a detector in your job, it models the count of events for the `signaturename` field. It ignores any buckets @@ -253,6 +258,7 @@ PUT _xpack/ml/anomaly_detectors/example6 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] This `distinct_count` function detects when a system has an unusual number of logged in users. When you use this function in a detector in your job, it @@ -278,6 +284,7 @@ PUT _xpack/ml/anomaly_detectors/example7 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] This example detects instances of port scanning. When you use this function in a detector in your job, it models the distinct count of ports. It also detects the diff --git a/x-pack/docs/en/ml/functions/geo.asciidoc b/docs/reference/ml/functions/geo.asciidoc similarity index 98% rename from x-pack/docs/en/ml/functions/geo.asciidoc rename to docs/reference/ml/functions/geo.asciidoc index 5bcf6c33945..461ab825ff5 100644 --- a/x-pack/docs/en/ml/functions/geo.asciidoc +++ b/docs/reference/ml/functions/geo.asciidoc @@ -47,6 +47,7 @@ PUT _xpack/ml/anomaly_detectors/example1 } -------------------------------------------------- // CONSOLE +// TEST[skip:needs-licence] If you use this `lat_long` function in a detector in your job, it detects anomalies where the geographic location of a credit card transaction is @@ -98,6 +99,6 @@ PUT _xpack/ml/datafeeds/datafeed-test2 } -------------------------------------------------- // CONSOLE -// TEST[setup:farequote_job] +// TEST[skip:setup:farequote_job] For more information, see <>. diff --git a/x-pack/docs/en/ml/functions/info.asciidoc b/docs/reference/ml/functions/info.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/info.asciidoc rename to docs/reference/ml/functions/info.asciidoc diff --git a/x-pack/docs/en/ml/functions/metric.asciidoc b/docs/reference/ml/functions/metric.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/metric.asciidoc rename to docs/reference/ml/functions/metric.asciidoc diff --git a/x-pack/docs/en/ml/functions/rare.asciidoc b/docs/reference/ml/functions/rare.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/rare.asciidoc rename to docs/reference/ml/functions/rare.asciidoc diff --git a/x-pack/docs/en/ml/functions/sum.asciidoc b/docs/reference/ml/functions/sum.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/sum.asciidoc rename to docs/reference/ml/functions/sum.asciidoc diff --git a/x-pack/docs/en/ml/functions/time.asciidoc b/docs/reference/ml/functions/time.asciidoc similarity index 100% rename from x-pack/docs/en/ml/functions/time.asciidoc rename to docs/reference/ml/functions/time.asciidoc diff --git a/x-pack/docs/en/ml/images/ml-category-advanced.jpg b/docs/reference/ml/images/ml-category-advanced.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-category-advanced.jpg rename to docs/reference/ml/images/ml-category-advanced.jpg diff --git a/x-pack/docs/en/ml/images/ml-category-anomalies.jpg b/docs/reference/ml/images/ml-category-anomalies.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-category-anomalies.jpg rename to docs/reference/ml/images/ml-category-anomalies.jpg diff --git a/x-pack/docs/en/ml/images/ml-categoryterms.jpg b/docs/reference/ml/images/ml-categoryterms.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-categoryterms.jpg rename to docs/reference/ml/images/ml-categoryterms.jpg diff --git a/x-pack/docs/en/ml/images/ml-create-job.jpg b/docs/reference/ml/images/ml-create-job.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-create-job.jpg rename to docs/reference/ml/images/ml-create-job.jpg diff --git a/x-pack/docs/en/ml/images/ml-create-jobs.jpg b/docs/reference/ml/images/ml-create-jobs.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-create-jobs.jpg rename to docs/reference/ml/images/ml-create-jobs.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl-detail.jpg b/docs/reference/ml/images/ml-customurl-detail.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl-detail.jpg rename to docs/reference/ml/images/ml-customurl-detail.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl-discover.jpg b/docs/reference/ml/images/ml-customurl-discover.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl-discover.jpg rename to docs/reference/ml/images/ml-customurl-discover.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl-edit.jpg b/docs/reference/ml/images/ml-customurl-edit.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl-edit.jpg rename to docs/reference/ml/images/ml-customurl-edit.jpg diff --git a/x-pack/docs/en/ml/images/ml-customurl.jpg b/docs/reference/ml/images/ml-customurl.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-customurl.jpg rename to docs/reference/ml/images/ml-customurl.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-dates.jpg b/docs/reference/ml/images/ml-data-dates.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-dates.jpg rename to docs/reference/ml/images/ml-data-dates.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-keywords.jpg b/docs/reference/ml/images/ml-data-keywords.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-keywords.jpg rename to docs/reference/ml/images/ml-data-keywords.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-metrics.jpg b/docs/reference/ml/images/ml-data-metrics.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-metrics.jpg rename to docs/reference/ml/images/ml-data-metrics.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-topmetrics.jpg b/docs/reference/ml/images/ml-data-topmetrics.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-topmetrics.jpg rename to docs/reference/ml/images/ml-data-topmetrics.jpg diff --git a/x-pack/docs/en/ml/images/ml-data-visualizer.jpg b/docs/reference/ml/images/ml-data-visualizer.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-data-visualizer.jpg rename to docs/reference/ml/images/ml-data-visualizer.jpg diff --git a/x-pack/docs/en/ml/images/ml-edit-job.jpg b/docs/reference/ml/images/ml-edit-job.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-edit-job.jpg rename to docs/reference/ml/images/ml-edit-job.jpg diff --git a/x-pack/docs/en/ml/images/ml-population-anomaly.jpg b/docs/reference/ml/images/ml-population-anomaly.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-population-anomaly.jpg rename to docs/reference/ml/images/ml-population-anomaly.jpg diff --git a/x-pack/docs/en/ml/images/ml-population-job.jpg b/docs/reference/ml/images/ml-population-job.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-population-job.jpg rename to docs/reference/ml/images/ml-population-job.jpg diff --git a/x-pack/docs/en/ml/images/ml-population-results.jpg b/docs/reference/ml/images/ml-population-results.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-population-results.jpg rename to docs/reference/ml/images/ml-population-results.jpg diff --git a/x-pack/docs/en/ml/images/ml-scriptfields.jpg b/docs/reference/ml/images/ml-scriptfields.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-scriptfields.jpg rename to docs/reference/ml/images/ml-scriptfields.jpg diff --git a/x-pack/docs/en/ml/images/ml-start-feed.jpg b/docs/reference/ml/images/ml-start-feed.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-start-feed.jpg rename to docs/reference/ml/images/ml-start-feed.jpg diff --git a/x-pack/docs/en/ml/images/ml-stop-feed.jpg b/docs/reference/ml/images/ml-stop-feed.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml-stop-feed.jpg rename to docs/reference/ml/images/ml-stop-feed.jpg diff --git a/x-pack/docs/en/ml/images/ml.jpg b/docs/reference/ml/images/ml.jpg similarity index 100% rename from x-pack/docs/en/ml/images/ml.jpg rename to docs/reference/ml/images/ml.jpg diff --git a/x-pack/docs/en/ml/populations.asciidoc b/docs/reference/ml/populations.asciidoc similarity index 94% rename from x-pack/docs/en/ml/populations.asciidoc rename to docs/reference/ml/populations.asciidoc index bf0dd2ad7d7..ed58c117f17 100644 --- a/x-pack/docs/en/ml/populations.asciidoc +++ b/docs/reference/ml/populations.asciidoc @@ -51,14 +51,11 @@ PUT _xpack/ml/anomaly_detectors/population } ---------------------------------- //CONSOLE +// TEST[skip:needs-licence] <1> This `over_field_name` property indicates that the metrics for each user ( as identified by their `username` value) are analyzed relative to other users in each bucket. -//TO-DO: Per sophiec20 "Perhaps add the datafeed config and add a query filter to -//include only workstations as servers and printers would behave differently -//from the population - If your data is stored in {es}, you can use the population job wizard in {kib} to create a job with these same properties. For example, the population job wizard provides the following job settings: diff --git a/x-pack/docs/en/ml/stopping-ml.asciidoc b/docs/reference/ml/stopping-ml.asciidoc similarity index 94% rename from x-pack/docs/en/ml/stopping-ml.asciidoc rename to docs/reference/ml/stopping-ml.asciidoc index c0be2d947cd..17505a02d15 100644 --- a/x-pack/docs/en/ml/stopping-ml.asciidoc +++ b/docs/reference/ml/stopping-ml.asciidoc @@ -28,7 +28,7 @@ request stops the `feed1` {dfeed}: POST _xpack/ml/datafeeds/datafeed-total-requests/_stop -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_startdf] +// TEST[skip:setup:server_metrics_startdf] NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. For more information, see <>. @@ -49,6 +49,7 @@ If you are upgrading your cluster, you can use the following request to stop all POST _xpack/ml/datafeeds/_all/_stop ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] [float] [[closing-ml-jobs]] @@ -67,7 +68,7 @@ example, the following request closes the `job1` job: POST _xpack/ml/anomaly_detectors/total-requests/_close -------------------------------------------------- // CONSOLE -// TEST[setup:server_metrics_openjob] +// TEST[skip:setup:server_metrics_openjob] NOTE: You must have `manage_ml`, or `manage` cluster privileges to stop {dfeeds}. For more information, see <>. @@ -86,3 +87,4 @@ all open jobs on the cluster: POST _xpack/ml/anomaly_detectors/_all/_close ---------------------------------- // CONSOLE +// TEST[skip:needs-licence] diff --git a/x-pack/docs/en/ml/transforms.asciidoc b/docs/reference/ml/transforms.asciidoc similarity index 97% rename from x-pack/docs/en/ml/transforms.asciidoc rename to docs/reference/ml/transforms.asciidoc index c4b4d560297..a2276895fc9 100644 --- a/x-pack/docs/en/ml/transforms.asciidoc +++ b/docs/reference/ml/transforms.asciidoc @@ -95,7 +95,7 @@ PUT /my_index/my_type/1 } ---------------------------------- // CONSOLE -// TESTSETUP +// TEST[skip:SETUP] <1> In this example, string fields are mapped as `keyword` fields to support aggregation. If you want both a full text (`text`) and a keyword (`keyword`) version of the same field, use multi-fields. For more information, see @@ -144,7 +144,7 @@ PUT _xpack/ml/datafeeds/datafeed-test1 } ---------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] <1> A script field named `total_error_count` is referenced in the detector within the job. <2> The script field is defined in the {dfeed}. @@ -163,7 +163,7 @@ You can preview the contents of the {dfeed} by using the following API: GET _xpack/ml/datafeeds/datafeed-test1/_preview ---------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] In this example, the API returns the following results, which contain a sum of the `error_count` and `aborted_count` values: @@ -177,8 +177,6 @@ the `error_count` and `aborted_count` values: } ] ---------------------------------- -// TESTRESPONSE - NOTE: This example demonstrates how to use script fields, but it contains insufficient data to generate meaningful results. For a full demonstration of @@ -254,7 +252,7 @@ PUT _xpack/ml/datafeeds/datafeed-test2 GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] <1> The script field has a rather generic name in this case, since it will be used for various tests in the subsequent examples. <2> The script field uses the plus (+) operator to concatenate strings. @@ -271,7 +269,6 @@ and "SMITH " have been concatenated and an underscore was added: } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform3]] .Example 3: Trimming strings @@ -292,7 +289,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field uses the `trim()` function to trim extra white space from a string. @@ -308,7 +305,6 @@ has been trimmed to "SMITH": } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform4]] .Example 4: Converting strings to lowercase @@ -329,7 +325,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field uses the `toLowerCase` function to convert a string to all lowercase letters. Likewise, you can use the `toUpperCase{}` function to convert a string to uppercase letters. @@ -346,7 +342,6 @@ has been converted to "joe": } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform5]] .Example 5: Converting strings to mixed case formats @@ -367,7 +362,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field is a more complicated example of case manipulation. It uses the `subString()` function to capitalize the first letter of a string and converts the remaining characters to lowercase. @@ -384,7 +379,6 @@ has been converted to "Joe": } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform6]] .Example 6: Replacing tokens @@ -405,7 +399,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field uses regular expressions to replace white space with underscores. @@ -421,7 +415,6 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform7]] .Example 7: Regular expression matching and concatenation @@ -442,7 +435,7 @@ POST _xpack/ml/datafeeds/datafeed-test2/_update GET _xpack/ml/datafeeds/datafeed-test2/_preview -------------------------------------------------- // CONSOLE -// TEST[continued] +// TEST[skip:continued] <1> This script field looks for a specific regular expression pattern and emits the matched groups as a concatenated string. If no match is found, it emits an empty string. @@ -459,7 +452,6 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform8]] .Example 8: Splitting strings by domain name @@ -509,7 +501,7 @@ PUT _xpack/ml/datafeeds/datafeed-test3 GET _xpack/ml/datafeeds/datafeed-test3/_preview -------------------------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] If you have a single field that contains a well-formed DNS domain name, you can use the `domainSplit()` function to split the string into its highest registered @@ -537,7 +529,6 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE [[ml-configuring-transform9]] .Example 9: Transforming geo_point data @@ -583,7 +574,7 @@ PUT _xpack/ml/datafeeds/datafeed-test4 GET _xpack/ml/datafeeds/datafeed-test4/_preview -------------------------------------------------- // CONSOLE -// TEST[skip:broken] +// TEST[skip:needs-licence] In {es}, location data can be stored in `geo_point` fields but this data type is not supported natively in {xpackml} analytics. This example of a script field @@ -602,4 +593,4 @@ The preview {dfeed} API returns the following results, which show that } ] ---------------------------------- -// TESTRESPONSE + diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index b88c7bf4547..1a932fdd414 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -544,3 +544,14 @@ You can use the following APIs to add, remove, and retrieve role mappings: === Privilege APIs See <>. + +[role="exclude",id="xpack-commands"] +=== X-Pack commands + +See <>. + +[role="exclude",id="ml-api-definitions"] +=== Machine learning API definitions + +See <>. + diff --git a/docs/reference/rest-api/defs.asciidoc b/docs/reference/rest-api/defs.asciidoc new file mode 100644 index 00000000000..4eeedc55399 --- /dev/null +++ b/docs/reference/rest-api/defs.asciidoc @@ -0,0 +1,27 @@ +[role="xpack"] +[[api-definitions]] +== Definitions + +These resource definitions are used in {ml} and {security} APIs and in {kib} +advanced {ml} job configuration options. + +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> + +include::{es-repo-dir}/ml/apis/calendarresource.asciidoc[] +include::{es-repo-dir}/ml/apis/datafeedresource.asciidoc[] +include::{es-repo-dir}/ml/apis/filterresource.asciidoc[] +include::{es-repo-dir}/ml/apis/jobresource.asciidoc[] +include::{es-repo-dir}/ml/apis/jobcounts.asciidoc[] +include::{es-repo-dir}/ml/apis/snapshotresource.asciidoc[] +include::{xes-repo-dir}/rest-api/security/role-mapping-resources.asciidoc[] +include::{es-repo-dir}/ml/apis/resultsresource.asciidoc[] +include::{es-repo-dir}/ml/apis/eventresource.asciidoc[] diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 9ec57940dd2..b80e8badf5b 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -22,8 +22,8 @@ include::info.asciidoc[] include::{xes-repo-dir}/rest-api/graph/explore.asciidoc[] include::{es-repo-dir}/licensing/index.asciidoc[] include::{es-repo-dir}/migration/migration.asciidoc[] -include::{xes-repo-dir}/rest-api/ml-api.asciidoc[] -include::{xes-repo-dir}/rest-api/rollup-api.asciidoc[] +include::{es-repo-dir}/ml/apis/ml-api.asciidoc[] +include::{es-repo-dir}/rollup/rollup-api.asciidoc[] include::{xes-repo-dir}/rest-api/security.asciidoc[] include::{xes-repo-dir}/rest-api/watcher.asciidoc[] -include::{xes-repo-dir}/rest-api/defs.asciidoc[] +include::defs.asciidoc[] diff --git a/x-pack/docs/en/rollup/api-quickref.asciidoc b/docs/reference/rollup/api-quickref.asciidoc similarity index 97% rename from x-pack/docs/en/rollup/api-quickref.asciidoc rename to docs/reference/rollup/api-quickref.asciidoc index 5e99f1c6984..1d372a03ddc 100644 --- a/x-pack/docs/en/rollup/api-quickref.asciidoc +++ b/docs/reference/rollup/api-quickref.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-api-quickref]] == API Quick Reference diff --git a/x-pack/docs/en/rest-api/rollup/delete-job.asciidoc b/docs/reference/rollup/apis/delete-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/delete-job.asciidoc rename to docs/reference/rollup/apis/delete-job.asciidoc index b795e0b28c7..37774560848 100644 --- a/x-pack/docs/en/rest-api/rollup/delete-job.asciidoc +++ b/docs/reference/rollup/apis/delete-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-delete-job]] === Delete Job API ++++ diff --git a/x-pack/docs/en/rest-api/rollup/get-job.asciidoc b/docs/reference/rollup/apis/get-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/get-job.asciidoc rename to docs/reference/rollup/apis/get-job.asciidoc index 96053dbfea6..794d7248012 100644 --- a/x-pack/docs/en/rest-api/rollup/get-job.asciidoc +++ b/docs/reference/rollup/apis/get-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-get-job]] === Get Rollup Jobs API ++++ diff --git a/x-pack/docs/en/rest-api/rollup/put-job.asciidoc b/docs/reference/rollup/apis/put-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/put-job.asciidoc rename to docs/reference/rollup/apis/put-job.asciidoc index 27889d985b8..79e30ae8dc9 100644 --- a/x-pack/docs/en/rest-api/rollup/put-job.asciidoc +++ b/docs/reference/rollup/apis/put-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-put-job]] === Create Job API ++++ diff --git a/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc b/docs/reference/rollup/apis/rollup-caps.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc rename to docs/reference/rollup/apis/rollup-caps.asciidoc index 1f233f195a0..907efb94c17 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-caps.asciidoc +++ b/docs/reference/rollup/apis/rollup-caps.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-get-rollup-caps]] === Get Rollup Job Capabilities ++++ diff --git a/x-pack/docs/en/rest-api/rollup/rollup-index-caps.asciidoc b/docs/reference/rollup/apis/rollup-index-caps.asciidoc similarity index 100% rename from x-pack/docs/en/rest-api/rollup/rollup-index-caps.asciidoc rename to docs/reference/rollup/apis/rollup-index-caps.asciidoc diff --git a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc b/docs/reference/rollup/apis/rollup-job-config.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc rename to docs/reference/rollup/apis/rollup-job-config.asciidoc index f937f28601a..3a917fb59f2 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-job-config.asciidoc +++ b/docs/reference/rollup/apis/rollup-job-config.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-job-config]] === Rollup Job Configuration diff --git a/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc b/docs/reference/rollup/apis/rollup-search.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc rename to docs/reference/rollup/apis/rollup-search.asciidoc index 115ef8fb043..8e7fc69a00a 100644 --- a/x-pack/docs/en/rest-api/rollup/rollup-search.asciidoc +++ b/docs/reference/rollup/apis/rollup-search.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-search]] === Rollup Search ++++ diff --git a/x-pack/docs/en/rest-api/rollup/start-job.asciidoc b/docs/reference/rollup/apis/start-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/start-job.asciidoc rename to docs/reference/rollup/apis/start-job.asciidoc index 9a0a0a7e4f0..cf44883895c 100644 --- a/x-pack/docs/en/rest-api/rollup/start-job.asciidoc +++ b/docs/reference/rollup/apis/start-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-start-job]] === Start Job API ++++ diff --git a/x-pack/docs/en/rest-api/rollup/stop-job.asciidoc b/docs/reference/rollup/apis/stop-job.asciidoc similarity index 99% rename from x-pack/docs/en/rest-api/rollup/stop-job.asciidoc rename to docs/reference/rollup/apis/stop-job.asciidoc index 60507402705..5912b2d688b 100644 --- a/x-pack/docs/en/rest-api/rollup/stop-job.asciidoc +++ b/docs/reference/rollup/apis/stop-job.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-stop-job]] === Stop Job API ++++ diff --git a/x-pack/docs/en/rollup/index.asciidoc b/docs/reference/rollup/index.asciidoc similarity index 97% rename from x-pack/docs/en/rollup/index.asciidoc rename to docs/reference/rollup/index.asciidoc index 9ac89341bfe..64dc233f82f 100644 --- a/x-pack/docs/en/rollup/index.asciidoc +++ b/docs/reference/rollup/index.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[xpack-rollup]] = Rolling up historical data diff --git a/x-pack/docs/en/rollup/overview.asciidoc b/docs/reference/rollup/overview.asciidoc similarity index 99% rename from x-pack/docs/en/rollup/overview.asciidoc rename to docs/reference/rollup/overview.asciidoc index a9a983fbecc..b2570f647e7 100644 --- a/x-pack/docs/en/rollup/overview.asciidoc +++ b/docs/reference/rollup/overview.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-overview]] == Overview diff --git a/x-pack/docs/en/rollup/rollup-agg-limitations.asciidoc b/docs/reference/rollup/rollup-agg-limitations.asciidoc similarity index 94% rename from x-pack/docs/en/rollup/rollup-agg-limitations.asciidoc rename to docs/reference/rollup/rollup-agg-limitations.asciidoc index cd20622d93c..9f8b6f66ade 100644 --- a/x-pack/docs/en/rollup/rollup-agg-limitations.asciidoc +++ b/docs/reference/rollup/rollup-agg-limitations.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-agg-limitations]] == Rollup Aggregation Limitations diff --git a/x-pack/docs/en/rest-api/rollup-api.asciidoc b/docs/reference/rollup/rollup-api.asciidoc similarity index 61% rename from x-pack/docs/en/rest-api/rollup-api.asciidoc rename to docs/reference/rollup/rollup-api.asciidoc index 9a8ec00d77a..099686fb432 100644 --- a/x-pack/docs/en/rest-api/rollup-api.asciidoc +++ b/docs/reference/rollup/rollup-api.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[rollup-apis]] == Rollup APIs @@ -26,12 +27,12 @@ -include::rollup/delete-job.asciidoc[] -include::rollup/get-job.asciidoc[] -include::rollup/put-job.asciidoc[] -include::rollup/start-job.asciidoc[] -include::rollup/stop-job.asciidoc[] -include::rollup/rollup-caps.asciidoc[] -include::rollup/rollup-index-caps.asciidoc[] -include::rollup/rollup-search.asciidoc[] -include::rollup/rollup-job-config.asciidoc[] \ No newline at end of file +include::apis/delete-job.asciidoc[] +include::apis/get-job.asciidoc[] +include::apis/put-job.asciidoc[] +include::apis/start-job.asciidoc[] +include::apis/stop-job.asciidoc[] +include::apis/rollup-caps.asciidoc[] +include::apis/rollup-index-caps.asciidoc[] +include::apis/rollup-search.asciidoc[] +include::apis/rollup-job-config.asciidoc[] diff --git a/x-pack/docs/en/rollup/rollup-getting-started.asciidoc b/docs/reference/rollup/rollup-getting-started.asciidoc similarity index 99% rename from x-pack/docs/en/rollup/rollup-getting-started.asciidoc rename to docs/reference/rollup/rollup-getting-started.asciidoc index b6c913d7d34..8f99bc2c010 100644 --- a/x-pack/docs/en/rollup/rollup-getting-started.asciidoc +++ b/docs/reference/rollup/rollup-getting-started.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-getting-started]] == Getting Started diff --git a/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc b/docs/reference/rollup/rollup-search-limitations.asciidoc similarity index 99% rename from x-pack/docs/en/rollup/rollup-search-limitations.asciidoc rename to docs/reference/rollup/rollup-search-limitations.asciidoc index 99f19a179ed..43feeab9a2e 100644 --- a/x-pack/docs/en/rollup/rollup-search-limitations.asciidoc +++ b/docs/reference/rollup/rollup-search-limitations.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-search-limitations]] == Rollup Search Limitations diff --git a/x-pack/docs/en/rollup/understanding-groups.asciidoc b/docs/reference/rollup/understanding-groups.asciidoc similarity index 99% rename from x-pack/docs/en/rollup/understanding-groups.asciidoc rename to docs/reference/rollup/understanding-groups.asciidoc index 803555b2d73..6321ab9b00f 100644 --- a/x-pack/docs/en/rollup/understanding-groups.asciidoc +++ b/docs/reference/rollup/understanding-groups.asciidoc @@ -1,3 +1,5 @@ +[role="xpack"] +[testenv="basic"] [[rollup-understanding-groups]] == Understanding Groups diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index b2444535153..e2df59ad3f4 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -596,7 +596,7 @@ And the response: ] }, { - "name": "BucketCollector: [[my_scoped_agg, my_global_agg]]", + "name": "MultiBucketCollector: [[my_scoped_agg, my_global_agg]]", "reason": "aggregation", "time_in_nanos": 8273 } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java index 87b16bc448e..5ea0d8312ad 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/ParentChildTestCase.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.join.ParentJoinPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -58,6 +59,8 @@ public abstract class ParentChildTestCase extends ESIntegTestCase { @Override public Settings indexSettings() { Settings.Builder builder = Settings.builder().put(super.indexSettings()) + // AwaitsFix: https://github.com/elastic/elasticsearch/issues/33318 + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) // aggressive filter caching so that we can assert on the filter cache size .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), true) .put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java index bf0adc6e142..72a2a0d7335 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestUpdateByQueryAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -63,7 +62,7 @@ public class RestUpdateByQueryAction extends AbstractBulkByQueryRestHandler> consumers = new HashMap<>(); consumers.put("conflicts", o -> internal.setConflicts((String) o)); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java index cc848900b78..30621ab607b 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RoundTripTests.java @@ -81,7 +81,7 @@ public class RoundTripTests extends ESTestCase { } public void testUpdateByQueryRequest() throws IOException { - UpdateByQueryRequest update = new UpdateByQueryRequest(new SearchRequest()); + UpdateByQueryRequest update = new UpdateByQueryRequest(); randomRequest(update); if (randomBoolean()) { update.setPipeline(randomAlphaOfLength(5)); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java index b688ce019e3..d3f62af907d 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryMetadataTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.reindex; import org.elasticsearch.index.reindex.ScrollableHitSource.Hit; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; public class UpdateByQueryMetadataTests @@ -39,7 +38,7 @@ public class UpdateByQueryMetadataTests @Override protected UpdateByQueryRequest request() { - return new UpdateByQueryRequest(new SearchRequest()); + return new UpdateByQueryRequest(); } private class TestAction extends TransportUpdateByQueryAction.AsyncIndexBySearchAction { diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java index 4006d16fbcb..8c9744aa0dd 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryWithScriptTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ScriptService; @@ -50,7 +49,7 @@ public class UpdateByQueryWithScriptTests @Override protected UpdateByQueryRequest request() { - return new UpdateByQueryRequest(new SearchRequest()); + return new UpdateByQueryRequest(); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java index ae029ce3f93..5d425b16d16 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportWriteAction.java @@ -130,6 +130,7 @@ public abstract class TransportWriteAction< implements RespondingWriteResult { boolean finishedAsyncActions; public final Location location; + public final IndexShard primary; ActionListener listener = null; public WritePrimaryResult(ReplicaRequest request, @Nullable Response finalResponse, @@ -137,6 +138,7 @@ public abstract class TransportWriteAction< IndexShard primary, Logger logger) { super(request, finalResponse, operationFailure); this.location = location; + this.primary = primary; assert location == null || operationFailure == null : "expected either failure to be null or translog location to be null, " + "but found: [" + location + "] translog location and [" + operationFailure + "] failure"; diff --git a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java b/server/src/main/java/org/elasticsearch/common/logging/Loggers.java index b2a24faf643..35e55f5f505 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/Loggers.java +++ b/server/src/main/java/org/elasticsearch/common/logging/Loggers.java @@ -49,16 +49,17 @@ public class Loggers { Setting.prefixKeySetting("logger.", (key) -> new Setting<>(key, Level.INFO.name(), Level::valueOf, Setting.Property.Dynamic, Setting.Property.NodeScope)); - public static Logger getLogger(Class clazz, Settings settings, ShardId shardId, String... prefixes) { - return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0])); + public static Logger getLogger(Class clazz, ShardId shardId, String... prefixes) { + return getLogger(clazz, Settings.EMPTY, + shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0])); } /** - * Just like {@link #getLogger(Class, org.elasticsearch.common.settings.Settings, ShardId, String...)} but String loggerName instead of + * Just like {@link #getLogger(Class, ShardId, String...)} but String loggerName instead of * Class. */ - public static Logger getLogger(String loggerName, Settings settings, ShardId shardId, String... prefixes) { - return getLogger(loggerName, settings, + public static Logger getLogger(String loggerName, ShardId shardId, String... prefixes) { + return getLogger(loggerName, Settings.EMPTY, asArrayList(shardId.getIndexName(), Integer.toString(shardId.id()), prefixes).toArray(new String[0])); } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 3ea022bbebd..6a612091b97 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -75,11 +75,10 @@ public final class IndexSettings { switch(s) { case "false": case "true": - case "fix": case "checksum": return s; default: - throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of [true, false, fix, checksum] but was: " + s); + throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of [true, false, checksum] but was: " + s); } }, Property.IndexScope); @@ -417,7 +416,7 @@ public final class IndexSettings { generationThresholdSize = scopedSettings.get(INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING); mergeSchedulerConfig = new MergeSchedulerConfig(this); gcDeletesInMillis = scopedSettings.get(INDEX_GC_DELETES_SETTING).getMillis(); - softDeleteEnabled = version.onOrAfter(Version.V_7_0_0_alpha1) && scopedSettings.get(INDEX_SOFT_DELETES_SETTING); + softDeleteEnabled = version.onOrAfter(Version.V_6_5_0) && scopedSettings.get(INDEX_SOFT_DELETES_SETTING); softDeleteRetentionOperations = scopedSettings.get(INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); warmerEnabled = scopedSettings.get(INDEX_WARMER_ENABLED_SETTING); maxResultWindow = scopedSettings.get(MAX_RESULT_WINDOW_SETTING); diff --git a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java index f4876149cac..7306b4e8cfd 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java @@ -71,7 +71,7 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler { this.config = indexSettings.getMergeSchedulerConfig(); this.shardId = shardId; this.indexSettings = indexSettings.getSettings(); - this.logger = Loggers.getLogger(getClass(), this.indexSettings, shardId); + this.logger = Loggers.getLogger(getClass(), shardId); refreshConfig(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 08724d6e794..0c54fb916f5 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -134,7 +134,7 @@ public abstract class Engine implements Closeable { this.allocationId = engineConfig.getAllocationId(); this.store = engineConfig.getStore(); this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name - engineConfig.getIndexSettings().getSettings(), engineConfig.getShardId()); + engineConfig.getShardId()); this.eventListener = engineConfig.getEventListener(); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index da4decc93b1..00d1f67f01b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -729,8 +729,7 @@ public class InternalEngine extends Engine { + index.getAutoGeneratedIdTimestamp(); switch (index.origin()) { case PRIMARY: - assert (index.version() == Versions.MATCH_ANY && index.versionType() == VersionType.INTERNAL) - : "version: " + index.version() + " type: " + index.versionType(); + assertPrimaryCanOptimizeAddDocument(index); return true; case PEER_RECOVERY: case REPLICA: @@ -747,9 +746,15 @@ public class InternalEngine extends Engine { return false; } + protected boolean assertPrimaryCanOptimizeAddDocument(final Index index) { + assert (index.version() == Versions.MATCH_ANY && index.versionType() == VersionType.INTERNAL) + : "version: " + index.version() + " type: " + index.versionType(); + return true; + } + private boolean assertIncomingSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) { if (origin == Operation.Origin.PRIMARY) { - assert assertOriginPrimarySequenceNumber(seqNo); + assertPrimaryIncomingSequenceNumber(origin, seqNo); } else { // sequence number should be set when operation origin is not primary assert seqNo >= 0 : "recovery or replica ops should have an assigned seq no.; origin: " + origin; @@ -757,7 +762,7 @@ public class InternalEngine extends Engine { return true; } - protected boolean assertOriginPrimarySequenceNumber(final long seqNo) { + protected boolean assertPrimaryIncomingSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) { // sequence number should not be set when operation origin is primary assert seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO : "primary operations must never have an assigned sequence number but was [" + seqNo + "]"; @@ -775,7 +780,7 @@ public class InternalEngine extends Engine { * @param operation the operation * @return the sequence number */ - protected long doGenerateSeqNoForOperation(final Operation operation) { + long doGenerateSeqNoForOperation(final Operation operation) { return localCheckpointTracker.generateSeqNo(); } @@ -818,14 +823,7 @@ public class InternalEngine extends Engine { * if A arrives on the shard first we use addDocument since maxUnsafeAutoIdTimestamp is < 10. A` will then just be skipped or calls * updateDocument. */ - final IndexingStrategy plan; - - if (index.origin() == Operation.Origin.PRIMARY) { - plan = planIndexingAsPrimary(index); - } else { - // non-primary mode (i.e., replica or recovery) - plan = planIndexingAsNonPrimary(index); - } + final IndexingStrategy plan = indexingStrategyForOperation(index); final IndexResult indexResult; if (plan.earlyResultOnPreFlightError.isPresent()) { @@ -873,7 +871,8 @@ public class InternalEngine extends Engine { } } - private IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOException { + protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOException { + assertNonPrimaryOrigin(index); final IndexingStrategy plan; final boolean appendOnlyRequest = canOptimizeAddDocument(index); if (appendOnlyRequest && mayHaveBeenIndexedBefore(index) == false && index.seqNo() > maxSeqNoOfNonAppendOnlyOperations.get()) { @@ -919,7 +918,16 @@ public class InternalEngine extends Engine { return plan; } - private IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { + protected IndexingStrategy indexingStrategyForOperation(final Index index) throws IOException { + if (index.origin() == Operation.Origin.PRIMARY) { + return planIndexingAsPrimary(index); + } else { + // non-primary mode (i.e., replica or recovery) + return planIndexingAsNonPrimary(index); + } + } + + protected final IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { assert index.origin() == Operation.Origin.PRIMARY : "planing as primary but origin isn't. got " + index.origin(); final IndexingStrategy plan; // resolve an external operation into an internal one which is safe to replay @@ -1157,12 +1165,7 @@ public class InternalEngine extends Engine { try (ReleasableLock ignored = readLock.acquire(); Releasable ignored2 = versionMap.acquireLock(delete.uid().bytes())) { ensureOpen(); lastWriteNanos = delete.startTime(); - final DeletionStrategy plan; - if (delete.origin() == Operation.Origin.PRIMARY) { - plan = planDeletionAsPrimary(delete); - } else { - plan = planDeletionAsNonPrimary(delete); - } + final DeletionStrategy plan = deletionStrategyForOperation(delete); if (plan.earlyResultOnPreflightError.isPresent()) { deleteResult = plan.earlyResultOnPreflightError.get(); @@ -1203,8 +1206,17 @@ public class InternalEngine extends Engine { return deleteResult; } - private DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws IOException { - assert delete.origin() != Operation.Origin.PRIMARY : "planing as primary but got " + delete.origin(); + protected DeletionStrategy deletionStrategyForOperation(final Delete delete) throws IOException { + if (delete.origin() == Operation.Origin.PRIMARY) { + return planDeletionAsPrimary(delete); + } else { + // non-primary mode (i.e., replica or recovery) + return planDeletionAsNonPrimary(delete); + } + } + + protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws IOException { + assertNonPrimaryOrigin(delete); maxSeqNoOfNonAppendOnlyOperations.updateAndGet(curr -> Math.max(delete.seqNo(), curr)); assert maxSeqNoOfNonAppendOnlyOperations.get() >= delete.seqNo() : "max_seqno of non-append-only was not updated;" + "max_seqno non-append-only [" + maxSeqNoOfNonAppendOnlyOperations.get() + "], seqno of delete [" + delete.seqNo() + "]"; @@ -1233,7 +1245,12 @@ public class InternalEngine extends Engine { return plan; } - private DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException { + protected boolean assertNonPrimaryOrigin(final Operation operation) { + assert operation.origin() != Operation.Origin.PRIMARY : "planing as primary but got " + operation.origin(); + return true; + } + + protected final DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException { assert delete.origin() == Operation.Origin.PRIMARY : "planing as primary but got " + delete.origin(); // resolve operation from external to internal final VersionValue versionValue = resolveDocVersion(delete); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java index 3b635c82387..b2e6f98f126 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/AbstractBulkByScrollRequest.java @@ -44,8 +44,8 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; public abstract class AbstractBulkByScrollRequest> extends ActionRequest { public static final int SIZE_ALL_MATCHES = -1; - static final TimeValue DEFAULT_SCROLL_TIMEOUT = timeValueMinutes(5); - static final int DEFAULT_SCROLL_SIZE = 1000; + public static final TimeValue DEFAULT_SCROLL_TIMEOUT = timeValueMinutes(5); + public static final int DEFAULT_SCROLL_SIZE = 1000; public static final int AUTO_SLICES = 0; public static final String AUTO_SLICES_VALUE = "auto"; diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java index 5beb86fae6b..7aa2c8a1b75 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java @@ -209,8 +209,8 @@ public class BulkByScrollTask extends CancellableTask { public static class StatusBuilder { private Integer sliceId = null; private Long total = null; - private Long updated = null; - private Long created = null; + private long updated = 0; // Not present during deleteByQuery + private long created = 0; // Not present during updateByQuery private Long deleted = null; private Integer batches = null; private Long versionConflicts = null; @@ -221,7 +221,7 @@ public class BulkByScrollTask extends CancellableTask { private Float requestsPerSecond = null; private String reasonCancelled = null; private TimeValue throttledUntil = null; - private List sliceStatuses = emptyList(); + private List sliceStatuses = new ArrayList<>(); public void setSliceId(Integer sliceId) { this.sliceId = sliceId; @@ -295,10 +295,14 @@ public class BulkByScrollTask extends CancellableTask { public void setSliceStatuses(List sliceStatuses) { if (sliceStatuses != null) { - this.sliceStatuses = sliceStatuses; + this.sliceStatuses.addAll(sliceStatuses); } } + public void addToSliceStatuses(StatusOrException statusOrException) { + this.sliceStatuses.add(statusOrException); + } + public Status buildStatus() { if (sliceStatuses.isEmpty()) { try { @@ -613,37 +617,20 @@ public class BulkByScrollTask extends CancellableTask { Token token = parser.currentToken(); String fieldName = parser.currentName(); ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser::getTokenLocation); - Integer sliceId = null; - Long total = null; - Long updated = null; - Long created = null; - Long deleted = null; - Integer batches = null; - Long versionConflicts = null; - Long noOps = null; - Long bulkRetries = null; - Long searchRetries = null; - TimeValue throttled = null; - Float requestsPerSecond = null; - String reasonCancelled = null; - TimeValue throttledUntil = null; - List sliceStatuses = new ArrayList<>(); + StatusBuilder builder = new StatusBuilder(); while ((token = parser.nextToken()) != Token.END_OBJECT) { if (token == Token.FIELD_NAME) { fieldName = parser.currentName(); } else if (token == Token.START_OBJECT) { if (fieldName.equals(Status.RETRIES_FIELD)) { - Tuple retries = - Status.RETRIES_PARSER.parse(parser, null); - bulkRetries = retries.v1(); - searchRetries = retries.v2(); + builder.setRetries(Status.RETRIES_PARSER.parse(parser, null)); } else { parser.skipChildren(); } } else if (token == Token.START_ARRAY) { if (fieldName.equals(Status.SLICES_FIELD)) { while ((token = parser.nextToken()) != Token.END_ARRAY) { - sliceStatuses.add(StatusOrException.fromXContent(parser)); + builder.addToSliceStatuses(StatusOrException.fromXContent(parser)); } } else { parser.skipChildren(); @@ -651,57 +638,47 @@ public class BulkByScrollTask extends CancellableTask { } else { // else if it is a value switch (fieldName) { case Status.SLICE_ID_FIELD: - sliceId = parser.intValue(); + builder.setSliceId(parser.intValue()); break; case Status.TOTAL_FIELD: - total = parser.longValue(); + builder.setTotal(parser.longValue()); break; case Status.UPDATED_FIELD: - updated = parser.longValue(); + builder.setUpdated(parser.longValue()); break; case Status.CREATED_FIELD: - created = parser.longValue(); + builder.setCreated(parser.longValue()); break; case Status.DELETED_FIELD: - deleted = parser.longValue(); + builder.setDeleted(parser.longValue()); break; case Status.BATCHES_FIELD: - batches = parser.intValue(); + builder.setBatches(parser.intValue()); break; case Status.VERSION_CONFLICTS_FIELD: - versionConflicts = parser.longValue(); + builder.setVersionConflicts(parser.longValue()); break; case Status.NOOPS_FIELD: - noOps = parser.longValue(); + builder.setNoops(parser.longValue()); break; case Status.THROTTLED_RAW_FIELD: - throttled = new TimeValue(parser.longValue(), TimeUnit.MILLISECONDS); + builder.setThrottled(parser.longValue()); break; case Status.REQUESTS_PER_SEC_FIELD: - requestsPerSecond = parser.floatValue(); - requestsPerSecond = requestsPerSecond == -1 ? Float.POSITIVE_INFINITY : requestsPerSecond; + builder.setRequestsPerSecond(parser.floatValue()); break; case Status.CANCELED_FIELD: - reasonCancelled = parser.text(); + builder.setReasonCancelled(parser.text()); break; case Status.THROTTLED_UNTIL_RAW_FIELD: - throttledUntil = new TimeValue(parser.longValue(), TimeUnit.MILLISECONDS); + builder.setThrottledUntil(parser.longValue()); break; default: break; } } } - if (sliceStatuses.isEmpty()) { - return - new Status( - sliceId, total, updated, created, deleted, batches, versionConflicts, noOps, bulkRetries, - searchRetries, throttled, requestsPerSecond, reasonCancelled, throttledUntil - ); - } else { - return new Status(sliceStatuses, reasonCancelled); - } - + return builder.buildStatus(); } @Override @@ -838,15 +815,15 @@ public class BulkByScrollTask extends CancellableTask { ); } - public boolean equalsWithoutSliceStatus(Object o) { + public boolean equalsWithoutSliceStatus(Object o, boolean includeUpdated, boolean includeCreated) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Status other = (Status) o; return Objects.equals(sliceId, other.sliceId) && total == other.total && - updated == other.updated && - created == other.created && + (!includeUpdated || updated == other.updated) && + (!includeCreated || created == other.created) && deleted == other.deleted && batches == other.batches && versionConflicts == other.versionConflicts && @@ -861,7 +838,7 @@ public class BulkByScrollTask extends CancellableTask { @Override public boolean equals(Object o) { - if (equalsWithoutSliceStatus(o)) { + if (equalsWithoutSliceStatus(o, true, true)) { return Objects.equals(sliceStatuses, ((Status) o).sliceStatuses); } else { return false; diff --git a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java index eb4fd59a7bc..71ffadc9303 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/UpdateByQueryRequest.java @@ -24,6 +24,9 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.tasks.TaskId; import java.io.IOException; @@ -34,16 +37,22 @@ import java.io.IOException; * representative set of subrequests. This is best-effort but better than {@linkplain ReindexRequest} because scripts can't change the * destination index and things. */ -public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest implements IndicesRequest.Replaceable { +public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest + implements IndicesRequest.Replaceable, ToXContentObject { /** * Ingest pipeline to set on index requests made by this action. */ private String pipeline; public UpdateByQueryRequest() { + this(new SearchRequest()); } - public UpdateByQueryRequest(SearchRequest search) { + public UpdateByQueryRequest(String... indices) { + this(new SearchRequest(indices)); + } + + UpdateByQueryRequest(SearchRequest search) { this(search, true); } @@ -59,8 +68,81 @@ public class UpdateByQueryRequest extends AbstractBulkIndexByScrollRequest segments(boolean verbose) { return getEngine().segments(verbose); } @@ -1933,6 +1955,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl if (store.tryIncRef()) { try { doCheckIndex(); + } catch (IOException e) { + store.markStoreCorrupted(e); + throw e; } finally { store.decRef(); } @@ -1976,18 +2001,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl return; } logger.warn("check index [failure]\n{}", os.bytes().utf8ToString()); - if ("fix".equals(checkIndexOnStartup)) { - if (logger.isDebugEnabled()) { - logger.debug("fixing index, writing new segments file ..."); - } - store.exorciseIndex(status); - if (logger.isDebugEnabled()) { - logger.debug("index fixed, wrote new segments file \"{}\"", status.segmentsFileName); - } - } else { - // only throw a failure if we are not going to fix the index - throw new IllegalStateException("index check failure but can't fix it"); - } + throw new IOException("index check failure"); } } diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 85975bc68c8..470f03afc48 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -134,7 +134,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref static final int VERSION_STACK_TRACE = 1; // we write the stack trace too since 1.4.0 static final int VERSION_START = 0; static final int VERSION = VERSION_WRITE_THROWABLE; - static final String CORRUPTED = "corrupted_"; + // public is for test purposes + public static final String CORRUPTED = "corrupted_"; public static final Setting INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING = Setting.timeSetting("index.store.stats_refresh_interval", TimeValue.timeValueSeconds(10), Property.IndexScope); @@ -164,7 +165,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref final TimeValue refreshInterval = indexSettings.getValue(INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING); logger.debug("store stats are refreshed with refresh_interval [{}]", refreshInterval); ByteSizeCachingDirectory sizeCachingDir = new ByteSizeCachingDirectory(dir, refreshInterval); - this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", settings, shardId)); + this.directory = new StoreDirectory(sizeCachingDir, Loggers.getLogger("index.store.deletes", shardId)); this.shardLock = shardLock; this.onClose = onClose; @@ -360,18 +361,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref } } - /** - * Repairs the index using the previous returned status from {@link #checkIndex(PrintStream)}. - */ - public void exorciseIndex(CheckIndex.Status status) throws IOException { - metadataLock.writeLock().lock(); - try (CheckIndex checkIndex = new CheckIndex(directory)) { - checkIndex.exorciseIndex(status); - } finally { - metadataLock.writeLock().unlock(); - } - } - public StoreStats stats() throws IOException { ensureOpen(); return new StoreStats(directory.estimateSize()); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java index 06e8a5734f6..ec05f0e30b0 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java @@ -173,10 +173,9 @@ public class PeerRecoverySourceService extends AbstractComponent implements Inde final RemoteRecoveryTargetHandler recoveryTarget = new RemoteRecoveryTargetHandler(request.recoveryId(), request.shardId(), transportService, request.targetNode(), recoverySettings, throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime)); - handler = new RecoverySourceHandler(shard, recoveryTarget, request, recoverySettings.getChunkSize().bytesAsInt(), settings); + handler = new RecoverySourceHandler(shard, recoveryTarget, request, recoverySettings.getChunkSize().bytesAsInt()); return handler; } } } } - diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 10f796e5e15..220abf43124 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -40,7 +40,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.FutureUtils; @@ -116,13 +115,12 @@ public class RecoverySourceHandler { public RecoverySourceHandler(final IndexShard shard, RecoveryTargetHandler recoveryTarget, final StartRecoveryRequest request, - final int fileChunkSizeInBytes, - final Settings nodeSettings) { + final int fileChunkSizeInBytes) { this.shard = shard; this.recoveryTarget = recoveryTarget; this.request = request; this.shardId = this.request.shardId().id(); - this.logger = Loggers.getLogger(getClass(), nodeSettings, request.shardId(), "recover to " + request.targetNode().getName()); + this.logger = Loggers.getLogger(getClass(), request.shardId(), "recover to " + request.targetNode().getName()); this.chunkSizeInBytes = fileChunkSizeInBytes; this.response = new RecoveryResponse(); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 1a772f0c3f8..e28b01c8a61 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -117,7 +117,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget this.cancellableThreads = new CancellableThreads(); this.recoveryId = idGenerator.incrementAndGet(); this.listener = listener; - this.logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); + this.logger = Loggers.getLogger(getClass(), indexShard.shardId()); this.indexShard = indexShard; this.sourceNode = sourceNode; this.shardId = indexShard.shardId(); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 7c0513f9eeb..250ef8f2dd9 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -424,7 +424,6 @@ public class Node implements Closeable { threadPool, settingsModule.getIndexScopedSettings(), circuitBreakerService, bigArrays, scriptModule.getScriptService(), client, metaStateService, engineFactoryProviders, indexStoreFactories); - Collection pluginComponents = pluginsService.filterPlugins(Plugin.class).stream() .flatMap(p -> p.createComponents(client, clusterService, threadPool, resourceWatcherService, scriptModule.getScriptService(), xContentRegistry, environment, nodeEnvironment, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java index 4dc765d0db1..75ef3c9199a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java @@ -60,7 +60,7 @@ public class AggregationPhase implements SearchPhase { } context.aggregations().aggregators(aggregators); if (!collectors.isEmpty()) { - Collector collector = BucketCollector.wrap(collectors); + Collector collector = MultiBucketCollector.wrap(collectors); ((BucketCollector)collector).preCollection(); if (context.getProfilers() != null) { collector = new InternalProfileCollector(collector, CollectorResult.REASON_AGGREGATION, @@ -97,7 +97,7 @@ public class AggregationPhase implements SearchPhase { // optimize the global collector based execution if (!globals.isEmpty()) { - BucketCollector globalsCollector = BucketCollector.wrap(globals); + BucketCollector globalsCollector = MultiBucketCollector.wrap(globals); Query query = context.buildFilteredQuery(Queries.newMatchAllQuery()); try { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java index 1e2e7332ab7..2ad76d8a2b4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java @@ -183,7 +183,7 @@ public abstract class AggregatorBase extends Aggregator { @Override public final void preCollection() throws IOException { List collectors = Arrays.asList(subAggregators); - collectableSubAggregators = BucketCollector.wrap(collectors); + collectableSubAggregators = MultiBucketCollector.wrap(collectors); doPreCollection(); collectableSubAggregators.preCollection(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java index 40e66bd9645..f2c8bf5e16e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/BucketCollector.java @@ -24,10 +24,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.stream.StreamSupport; /** * A Collector that can collect data in separate buckets. @@ -54,61 +50,6 @@ public abstract class BucketCollector implements Collector { } }; - /** - * Wrap the given collectors into a single instance. - */ - public static BucketCollector wrap(Iterable collectorList) { - final BucketCollector[] collectors = - StreamSupport.stream(collectorList.spliterator(), false).toArray(size -> new BucketCollector[size]); - switch (collectors.length) { - case 0: - return NO_OP_COLLECTOR; - case 1: - return collectors[0]; - default: - return new BucketCollector() { - - @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { - List leafCollectors = new ArrayList<>(collectors.length); - for (BucketCollector c : collectors) { - leafCollectors.add(c.getLeafCollector(ctx)); - } - return LeafBucketCollector.wrap(leafCollectors); - } - - @Override - public void preCollection() throws IOException { - for (BucketCollector collector : collectors) { - collector.preCollection(); - } - } - - @Override - public void postCollection() throws IOException { - for (BucketCollector collector : collectors) { - collector.postCollection(); - } - } - - @Override - public boolean needsScores() { - for (BucketCollector collector : collectors) { - if (collector.needsScores()) { - return true; - } - } - return false; - } - - @Override - public String toString() { - return Arrays.toString(collectors); - } - }; - } - } - @Override public abstract LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java new file mode 100644 index 00000000000..a8a015ab545 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/MultiBucketCollector.java @@ -0,0 +1,207 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.MultiCollector; +import org.apache.lucene.search.ScoreCachingWrappingScorer; +import org.apache.lucene.search.Scorer; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * A {@link BucketCollector} which allows running a bucket collection with several + * {@link BucketCollector}s. It is similar to the {@link MultiCollector} except that the + * {@link #wrap} method filters out the {@link BucketCollector#NO_OP_COLLECTOR}s and not + * the null ones. + */ +public class MultiBucketCollector extends BucketCollector { + + /** See {@link #wrap(Iterable)}. */ + public static BucketCollector wrap(BucketCollector... collectors) { + return wrap(Arrays.asList(collectors)); + } + + /** + * Wraps a list of {@link BucketCollector}s with a {@link MultiBucketCollector}. This + * method works as follows: + *
    + *
  • Filters out the {@link BucketCollector#NO_OP_COLLECTOR}s collectors, so they are not used + * during search time. + *
  • If the input contains 1 real collector, it is returned. + *
  • Otherwise the method returns a {@link MultiBucketCollector} which wraps the + * non-{@link BucketCollector#NO_OP_COLLECTOR} collectors. + *
+ */ + public static BucketCollector wrap(Iterable collectors) { + // For the user's convenience, we allow NO_OP collectors to be passed. + // However, to improve performance, these null collectors are found + // and dropped from the array we save for actual collection time. + int n = 0; + for (BucketCollector c : collectors) { + if (c != NO_OP_COLLECTOR) { + n++; + } + } + + if (n == 0) { + return NO_OP_COLLECTOR; + } else if (n == 1) { + // only 1 Collector - return it. + BucketCollector col = null; + for (BucketCollector c : collectors) { + if (c != null) { + col = c; + break; + } + } + return col; + } else { + BucketCollector[] colls = new BucketCollector[n]; + n = 0; + for (BucketCollector c : collectors) { + if (c != null) { + colls[n++] = c; + } + } + return new MultiBucketCollector(colls); + } + } + + private final boolean cacheScores; + private final BucketCollector[] collectors; + + private MultiBucketCollector(BucketCollector... collectors) { + this.collectors = collectors; + int numNeedsScores = 0; + for (BucketCollector collector : collectors) { + if (collector.needsScores()) { + numNeedsScores += 1; + } + } + this.cacheScores = numNeedsScores >= 2; + } + + @Override + public void preCollection() throws IOException { + for (BucketCollector collector : collectors) { + collector.preCollection(); + } + } + + @Override + public void postCollection() throws IOException { + for (BucketCollector collector : collectors) { + collector.postCollection(); + } + } + + @Override + public boolean needsScores() { + for (BucketCollector collector : collectors) { + if (collector.needsScores()) { + return true; + } + } + return false; + } + + @Override + public String toString() { + return Arrays.toString(collectors); + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext context) throws IOException { + final List leafCollectors = new ArrayList<>(); + for (BucketCollector collector : collectors) { + final LeafBucketCollector leafCollector; + try { + leafCollector = collector.getLeafCollector(context); + } catch (CollectionTerminatedException e) { + // this leaf collector does not need this segment + continue; + } + leafCollectors.add(leafCollector); + } + switch (leafCollectors.size()) { + case 0: + throw new CollectionTerminatedException(); + case 1: + return leafCollectors.get(0); + default: + return new MultiLeafBucketCollector(leafCollectors, cacheScores); + } + } + + private static class MultiLeafBucketCollector extends LeafBucketCollector { + + private final boolean cacheScores; + private final LeafBucketCollector[] collectors; + private int numCollectors; + + private MultiLeafBucketCollector(List collectors, boolean cacheScores) { + this.collectors = collectors.toArray(new LeafBucketCollector[collectors.size()]); + this.cacheScores = cacheScores; + this.numCollectors = this.collectors.length; + } + + @Override + public void setScorer(Scorer scorer) throws IOException { + if (cacheScores) { + scorer = new ScoreCachingWrappingScorer(scorer); + } + for (int i = 0; i < numCollectors; ++i) { + final LeafCollector c = collectors[i]; + c.setScorer(scorer); + } + } + + private void removeCollector(int i) { + System.arraycopy(collectors, i + 1, collectors, i, numCollectors - i - 1); + --numCollectors; + collectors[numCollectors] = null; + } + + @Override + public void collect(int doc, long bucket) throws IOException { + final LeafBucketCollector[] collectors = this.collectors; + int numCollectors = this.numCollectors; + for (int i = 0; i < numCollectors; ) { + final LeafBucketCollector collector = collectors[i]; + try { + collector.collect(doc, bucket); + ++i; + } catch (CollectionTerminatedException e) { + removeCollector(i); + numCollectors = this.numCollectors; + if (numCollectors == 0) { + throw new CollectionTerminatedException(); + } + } + } + } + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java index d6be0f57866..6ebf9e3c41c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java @@ -33,6 +33,7 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.MultiBucketCollector; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -90,7 +91,7 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector { /** Set the deferred collectors. */ @Override public void setDeferredCollector(Iterable deferredCollectors) { - this.collector = BucketCollector.wrap(deferredCollectors); + this.collector = MultiBucketCollector.wrap(deferredCollectors); } private void finishLeaf() { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferableBucketAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferableBucketAggregator.java index 0ff5ea12b97..b4e2243f17a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferableBucketAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferableBucketAggregator.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BucketCollector; +import org.elasticsearch.search.aggregations.MultiBucketCollector; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.internal.SearchContext; @@ -59,7 +60,7 @@ public abstract class DeferableBucketAggregator extends BucketsAggregator { recordingWrapper.setDeferredCollector(deferredCollectors); collectors.add(recordingWrapper); } - collectableSubAggregators = BucketCollector.wrap(collectors); + collectableSubAggregators = MultiBucketCollector.wrap(collectors); } public static boolean descendsFromGlobalAggregator(Aggregator parent) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java index f357e9d286f..5653bc58f2a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.MultiBucketCollector; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -61,7 +62,7 @@ public class MergingBucketsDeferringCollector extends DeferringBucketCollector { @Override public void setDeferredCollector(Iterable deferredCollectors) { - this.collector = BucketCollector.wrap(deferredCollectors); + this.collector = MultiBucketCollector.wrap(deferredCollectors); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java index ab529ac033e..97c535f56c6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -38,6 +38,7 @@ import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.MultiBucketCollector; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -93,7 +94,7 @@ final class CompositeAggregator extends BucketsAggregator { @Override protected void doPreCollection() throws IOException { List collectors = Arrays.asList(subAggregators); - deferredCollectors = BucketCollector.wrap(collectors); + deferredCollectors = MultiBucketCollector.wrap(collectors); collectableSubAggregators = BucketCollector.NO_OP_COLLECTOR; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java index 05d9402230d..bb89173e767 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/BestDocsDeferringCollector.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.MultiBucketCollector; import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector; import java.io.IOException; @@ -76,7 +77,7 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme /** Set the deferred collectors. */ @Override public void setDeferredCollector(Iterable deferredCollectors) { - this.deferred = BucketCollector.wrap(deferredCollectors); + this.deferred = MultiBucketCollector.wrap(deferredCollectors); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java index f0e9a57f7f3..39f04c6b7b0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/MetaDataIndexTemplateServiceTests.java @@ -69,7 +69,7 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase { containsString("Failed to parse value [0] for setting [index.number_of_shards] must be >= 1")); assertThat(throwables.get(0).getMessage(), containsString("unknown value for [index.shard.check_on_startup] " + - "must be one of [true, false, fix, checksum] but was: blargh")); + "must be one of [true, false, checksum] but was: blargh")); } public void testIndexTemplateValidationAccumulatesValidationErrors() { diff --git a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java index 2d097366a27..a4927ebc548 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/LuceneChangesSnapshotTests.java @@ -191,6 +191,7 @@ public class LuceneChangesSnapshotTests extends EngineTestCase { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33344") public void testUpdateAndReadChangesConcurrently() throws Exception { Follower[] followers = new Follower[between(1, 3)]; CountDownLatch readyLatch = new CountDownLatch(followers.length + 1); diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java index 0dd4d6bc849..71aab8ca9f9 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollResponseTests.java @@ -24,11 +24,15 @@ import org.elasticsearch.Version; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; +import org.elasticsearch.index.reindex.BulkByScrollTask.Status; import java.io.IOException; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; @@ -38,6 +42,9 @@ import static org.hamcrest.Matchers.containsString; public class BulkByScrollResponseTests extends AbstractXContentTestCase { + private boolean includeUpdated; + private boolean includeCreated; + public void testRountTrip() throws IOException { BulkByScrollResponse response = new BulkByScrollResponse(timeValueMillis(randomNonNegativeLong()), BulkByScrollTaskStatusTests.randomStatus(), randomIndexingFailures(), randomSearchFailures(), randomBoolean()); @@ -97,10 +104,11 @@ public class BulkByScrollResponseTests extends AbstractXContentTestCase params = new HashMap<>(); + if (randomBoolean()) { + includeUpdated = false; + params.put(Status.INCLUDE_UPDATED, "false"); + } else { + includeUpdated = true; + } + if (randomBoolean()) { + includeCreated = false; + params.put(Status.INCLUDE_CREATED, "false"); + } else { + includeCreated = true; + } + return new ToXContent.MapParams(params); + } } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusOrExceptionTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusOrExceptionTests.java index 33c56bacd91..0d84b0e1412 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusOrExceptionTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusOrExceptionTests.java @@ -55,11 +55,14 @@ public class BulkByScrollTaskStatusOrExceptionTests extends AbstractXContentTest return StatusOrException.fromXContent(parser); } - public static void assertEqualStatusOrException(StatusOrException expected, StatusOrException actual) { + public static void assertEqualStatusOrException(StatusOrException expected, StatusOrException actual, + boolean includeUpdated, boolean includeCreated) { if (expected != null && actual != null) { assertNotSame(expected, actual); if (expected.getException() == null) { - BulkByScrollTaskStatusTests.assertEqualStatus(expected.getStatus(), actual.getStatus()); + BulkByScrollTaskStatusTests + // we test includeCreated params in the Status tests + .assertEqualStatus(expected.getStatus(), actual.getStatus(), includeUpdated, includeCreated); } else { assertThat( actual.getException().getMessage(), @@ -74,7 +77,7 @@ public class BulkByScrollTaskStatusOrExceptionTests extends AbstractXContentTest @Override protected void assertEqualInstances(StatusOrException expected, StatusOrException actual) { - assertEqualStatusOrException(expected, actual); + assertEqualStatusOrException(expected, actual, true, true); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java index 368e1b3bdac..13db9f4766e 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/BulkByScrollTaskStatusTests.java @@ -33,7 +33,9 @@ import org.hamcrest.Matchers; import org.elasticsearch.index.reindex.BulkByScrollTask.Status; import java.io.IOException; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import java.util.stream.IntStream; @@ -44,6 +46,10 @@ import static org.apache.lucene.util.TestUtil.randomSimpleString; import static org.hamcrest.Matchers.equalTo; public class BulkByScrollTaskStatusTests extends AbstractXContentTestCase { + + private boolean includeUpdated; + private boolean includeCreated; + public void testBulkByTaskStatus() throws IOException { BulkByScrollTask.Status status = randomStatus(); BytesStreamOutput out = new BytesStreamOutput(); @@ -144,21 +150,21 @@ public class BulkByScrollTaskStatusTests extends AbstractXContentTestCase params = new HashMap<>(); + if (randomBoolean()) { + includeUpdated = false; + params.put(Status.INCLUDE_UPDATED, "false"); + } else { + includeUpdated = true; + } + if (randomBoolean()) { + includeCreated = false; + params.put(Status.INCLUDE_CREATED, "false"); + } else { + includeCreated = true; + } + return new ToXContent.MapParams(params); + } } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java b/server/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java index b30968cf056..47449eb7391 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/UpdateByQueryRequestTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import static org.apache.lucene.util.TestUtil.randomSimpleString; @@ -32,11 +31,11 @@ public class UpdateByQueryRequestTests extends AbstractBulkByScrollRequestTestCa indices[i] = randomSimpleString(random(), 1, 30); } - SearchRequest searchRequest = new SearchRequest(indices); IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); - searchRequest.indicesOptions(indicesOptions); - UpdateByQueryRequest request = new UpdateByQueryRequest(searchRequest); + UpdateByQueryRequest request = new UpdateByQueryRequest(); + request.indices(indices); + request.setIndicesOptions(indicesOptions); for (int i = 0; i < numIndices; i++) { assertEquals(indices[i], request.indices()[i]); } @@ -60,7 +59,7 @@ public class UpdateByQueryRequestTests extends AbstractBulkByScrollRequestTestCa @Override protected UpdateByQueryRequest newRequest() { - return new UpdateByQueryRequest(new SearchRequest(randomAlphaOfLength(5))); + return new UpdateByQueryRequest(randomAlphaOfLength(5)); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 50f95bf4d47..876e8fa4255 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.Term; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; @@ -118,6 +119,7 @@ import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotInfo; import org.elasticsearch.snapshots.SnapshotShardFailure; +import org.elasticsearch.test.CorruptionUtils; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.FieldMaskingReader; import org.elasticsearch.test.VersionUtils; @@ -126,7 +128,11 @@ import org.elasticsearch.ElasticsearchException; import java.io.IOException; import java.nio.charset.Charset; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -1239,7 +1245,7 @@ public class IndexShardTests extends IndexShardTestCase { }; try (Store store = createStore(shardId, new IndexSettings(metaData, Settings.EMPTY), directory)) { - IndexShard shard = newShard(shardRouting, shardPath, metaData, store, + IndexShard shard = newShard(shardRouting, shardPath, metaData, i -> store, null, new InternalEngineFactory(), () -> { }, EMPTY_EVENT_LISTENER); AtomicBoolean failureCallbackTriggered = new AtomicBoolean(false); @@ -2590,6 +2596,144 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(newShard); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33345") + public void testIndexCheckOnStartup() throws Exception { + final IndexShard indexShard = newStartedShard(true); + + final long numDocs = between(10, 100); + for (long i = 0; i < numDocs; i++) { + indexDoc(indexShard, "_doc", Long.toString(i), "{}"); + } + indexShard.flush(new FlushRequest()); + closeShards(indexShard); + + final ShardPath shardPath = indexShard.shardPath(); + + final Path indexPath = corruptIndexFile(shardPath); + + final AtomicInteger corruptedMarkerCount = new AtomicInteger(); + final SimpleFileVisitor corruptedVisitor = new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (Files.isRegularFile(file) && file.getFileName().toString().startsWith(Store.CORRUPTED)) { + corruptedMarkerCount.incrementAndGet(); + } + return FileVisitResult.CONTINUE; + } + }; + Files.walkFileTree(indexPath, corruptedVisitor); + + assertThat("corruption marker should not be there", corruptedMarkerCount.get(), equalTo(0)); + + final ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), + RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE + ); + // start shard and perform index check on startup. It enforce shard to fail due to corrupted index files + final IndexMetaData indexMetaData = IndexMetaData.builder(indexShard.indexSettings().getIndexMetaData()) + .settings(Settings.builder() + .put(indexShard.indexSettings.getSettings()) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("true", "checksum"))) + .build(); + + IndexShard corruptedShard = newShard(shardRouting, shardPath, indexMetaData, + null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + + final IndexShardRecoveryException indexShardRecoveryException = + expectThrows(IndexShardRecoveryException.class, () -> newStartedShard(p -> corruptedShard, true)); + assertThat(indexShardRecoveryException.getMessage(), equalTo("failed recovery")); + + // check that corrupt marker is there + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store has to be marked as corrupted", corruptedMarkerCount.get(), equalTo(1)); + + try { + closeShards(corruptedShard); + } catch (RuntimeException e) { + assertThat(e.getMessage(), equalTo("CheckIndex failed")); + } + } + + public void testShardDoesNotStartIfCorruptedMarkerIsPresent() throws Exception { + final IndexShard indexShard = newStartedShard(true); + + final long numDocs = between(10, 100); + for (long i = 0; i < numDocs; i++) { + indexDoc(indexShard, "_doc", Long.toString(i), "{}"); + } + indexShard.flush(new FlushRequest()); + closeShards(indexShard); + + final ShardPath shardPath = indexShard.shardPath(); + + final ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(indexShard.routingEntry(), + RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE + ); + final IndexMetaData indexMetaData = indexShard.indexSettings().getIndexMetaData(); + + final Path indexPath = shardPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + + // create corrupted marker + final String corruptionMessage = "fake ioexception"; + try(Store store = createStore(indexShard.indexSettings(), shardPath)) { + store.markStoreCorrupted(new IOException(corruptionMessage)); + } + + // try to start shard on corrupted files + final IndexShard corruptedShard = newShard(shardRouting, shardPath, indexMetaData, + null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + + final IndexShardRecoveryException exception1 = expectThrows(IndexShardRecoveryException.class, + () -> newStartedShard(p -> corruptedShard, true)); + assertThat(exception1.getCause().getMessage(), equalTo(corruptionMessage + " (resource=preexisting_corruption)")); + closeShards(corruptedShard); + + final AtomicInteger corruptedMarkerCount = new AtomicInteger(); + final SimpleFileVisitor corruptedVisitor = new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + if (Files.isRegularFile(file) && file.getFileName().toString().startsWith(Store.CORRUPTED)) { + corruptedMarkerCount.incrementAndGet(); + } + return FileVisitResult.CONTINUE; + } + }; + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store has to be marked as corrupted", corruptedMarkerCount.get(), equalTo(1)); + + // try to start another time shard on corrupted files + final IndexShard corruptedShard2 = newShard(shardRouting, shardPath, indexMetaData, + null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + + final IndexShardRecoveryException exception2 = expectThrows(IndexShardRecoveryException.class, + () -> newStartedShard(p -> corruptedShard2, true)); + assertThat(exception2.getCause().getMessage(), equalTo(corruptionMessage + " (resource=preexisting_corruption)")); + closeShards(corruptedShard2); + + // check that corrupt marker is there + corruptedMarkerCount.set(0); + Files.walkFileTree(indexPath, corruptedVisitor); + assertThat("store still has a single corrupt marker", corruptedMarkerCount.get(), equalTo(1)); + } + + private Path corruptIndexFile(ShardPath shardPath) throws IOException { + final Path indexPath = shardPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME); + final Path[] filesToCorrupt = + Files.walk(indexPath) + .filter(p -> { + final String name = p.getFileName().toString(); + return Files.isRegularFile(p) + && name.startsWith("extra") == false // Skip files added by Lucene's ExtrasFS + && IndexWriter.WRITE_LOCK_NAME.equals(name) == false + && name.startsWith("segments_") == false && name.endsWith(".si") == false; + }) + .toArray(Path[]::new); + CorruptionUtils.corruptFile(random(), filesToCorrupt); + return indexPath; + } + /** * Simulates a scenario that happens when we are async fetching snapshot metadata from GatewayService * and checking index concurrently. This should always be possible without any exception. @@ -2613,7 +2757,7 @@ public class IndexShardTests extends IndexShardTestCase { final IndexMetaData indexMetaData = IndexMetaData.builder(indexShard.indexSettings().getIndexMetaData()) .settings(Settings.builder() .put(indexShard.indexSettings.getSettings()) - .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum", "fix"))) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum"))) .build(); final IndexShard newShard = newShard(shardRouting, indexShard.shardPath(), indexMetaData, null, null, indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); diff --git a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java index 29b16ca28f4..36d52d4475b 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java @@ -125,6 +125,7 @@ public class PrimaryReplicaSyncerTests extends IndexShardTestCase { closeShards(shard); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33330") public void testSyncerOnClosingShard() throws Exception { IndexShard shard = newStartedShard(true); AtomicBoolean syncActionCalled = new AtomicBoolean(); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 0351111c305..d0074791bfa 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -108,7 +108,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { final StartRecoveryRequest request = getStartRecoveryRequest(); Store store = newStore(createTempDir()); RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, - recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY); + recoverySettings.getChunkSize().bytesAsInt()); Directory dir = store.directory(); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig()); int numDocs = randomIntBetween(10, 100); @@ -174,7 +174,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { when(shard.state()).thenReturn(IndexShardState.STARTED); final RecoveryTargetHandler recoveryTarget = mock(RecoveryTargetHandler.class); final RecoverySourceHandler handler = - new RecoverySourceHandler(shard, recoveryTarget, request, fileChunkSizeInBytes, Settings.EMPTY); + new RecoverySourceHandler(shard, recoveryTarget, request, fileChunkSizeInBytes); final List operations = new ArrayList<>(); final int initialNumberOfDocs = randomIntBetween(16, 64); for (int i = 0; i < initialNumberOfDocs; i++) { @@ -281,7 +281,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { Store store = newStore(tempDir, false); AtomicBoolean failedEngine = new AtomicBoolean(false); RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, - recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY) { + recoverySettings.getChunkSize().bytesAsInt()) { @Override protected void failEngine(IOException cause) { assertFalse(failedEngine.get()); @@ -340,7 +340,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { Store store = newStore(tempDir, false); AtomicBoolean failedEngine = new AtomicBoolean(false); RecoverySourceHandler handler = new RecoverySourceHandler(null, null, request, - recoverySettings.getChunkSize().bytesAsInt(), Settings.EMPTY) { + recoverySettings.getChunkSize().bytesAsInt()) { @Override protected void failEngine(IOException cause) { assertFalse(failedEngine.get()); @@ -405,11 +405,10 @@ public class RecoverySourceHandlerTests extends ESTestCase { final AtomicBoolean prepareTargetForTranslogCalled = new AtomicBoolean(); final AtomicBoolean phase2Called = new AtomicBoolean(); final RecoverySourceHandler handler = new RecoverySourceHandler( - shard, - mock(RecoveryTargetHandler.class), - request, - recoverySettings.getChunkSize().bytesAsInt(), - Settings.EMPTY) { + shard, + mock(RecoveryTargetHandler.class), + request, + recoverySettings.getChunkSize().bytesAsInt()) { @Override public void phase1(final IndexCommit snapshot, final Supplier translogOps) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java new file mode 100644 index 00000000000..f9abdeed50f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/MultiBucketCollectorTests.java @@ -0,0 +1,262 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.store.Directory; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +public class MultiBucketCollectorTests extends ESTestCase { + private static class FakeScorer extends Scorer { + float score; + int doc = -1; + + FakeScorer() { + super(null); + } + + @Override + public int docID() { + return doc; + } + + @Override + public float score() { + return score; + } + + @Override + public DocIdSetIterator iterator() { + throw new UnsupportedOperationException(); + } + + @Override + public Weight getWeight() { + throw new UnsupportedOperationException(); + } + + @Override + public Collection getChildren() { + throw new UnsupportedOperationException(); + } + } + + private static class TerminateAfterBucketCollector extends BucketCollector { + + private int count = 0; + private final int terminateAfter; + private final BucketCollector in; + + TerminateAfterBucketCollector(BucketCollector in, int terminateAfter) { + this.in = in; + this.terminateAfter = terminateAfter; + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext context) throws IOException { + if (count >= terminateAfter) { + throw new CollectionTerminatedException(); + } + final LeafBucketCollector leafCollector = in.getLeafCollector(context); + return new LeafBucketCollectorBase(leafCollector, null) { + @Override + public void collect(int doc, long bucket) throws IOException { + if (count >= terminateAfter) { + throw new CollectionTerminatedException(); + } + super.collect(doc, bucket); + count++; + } + }; + } + + @Override + public boolean needsScores() { + return false; + } + + @Override + public void preCollection() {} + + @Override + public void postCollection() {} + } + + private static class TotalHitCountBucketCollector extends BucketCollector { + + private int count = 0; + + TotalHitCountBucketCollector() { + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext context) { + return new LeafBucketCollector() { + @Override + public void collect(int doc, long bucket) throws IOException { + count++; + } + }; + } + + @Override + public boolean needsScores() { + return false; + } + + @Override + public void preCollection() {} + + @Override + public void postCollection() {} + + int getTotalHits() { + return count; + } + } + + private static class SetScorerBucketCollector extends BucketCollector { + private final BucketCollector in; + private final AtomicBoolean setScorerCalled; + + SetScorerBucketCollector(BucketCollector in, AtomicBoolean setScorerCalled) { + this.in = in; + this.setScorerCalled = setScorerCalled; + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext context) throws IOException { + final LeafBucketCollector leafCollector = in.getLeafCollector(context); + return new LeafBucketCollectorBase(leafCollector, null) { + @Override + public void setScorer(Scorer scorer) throws IOException { + super.setScorer(scorer); + setScorerCalled.set(true); + } + }; + } + + @Override + public boolean needsScores() { + return false; + } + + @Override + public void preCollection() {} + + @Override + public void postCollection() {} + } + + public void testCollectionTerminatedExceptionHandling() throws IOException { + final int iters = atLeast(3); + for (int iter = 0; iter < iters; ++iter) { + Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); + final int numDocs = randomIntBetween(100, 1000); + final Document doc = new Document(); + for (int i = 0; i < numDocs; ++i) { + w.addDocument(doc); + } + final IndexReader reader = w.getReader(); + w.close(); + final IndexSearcher searcher = newSearcher(reader); + Map expectedCounts = new HashMap<>(); + List collectors = new ArrayList<>(); + final int numCollectors = randomIntBetween(1, 5); + for (int i = 0; i < numCollectors; ++i) { + final int terminateAfter = random().nextInt(numDocs + 10); + final int expectedCount = terminateAfter > numDocs ? numDocs : terminateAfter; + TotalHitCountBucketCollector collector = new TotalHitCountBucketCollector(); + expectedCounts.put(collector, expectedCount); + collectors.add(new TerminateAfterBucketCollector(collector, terminateAfter)); + } + searcher.search(new MatchAllDocsQuery(), MultiBucketCollector.wrap(collectors)); + for (Map.Entry expectedCount : expectedCounts.entrySet()) { + assertEquals(expectedCount.getValue().intValue(), expectedCount.getKey().getTotalHits()); + } + reader.close(); + dir.close(); + } + } + + public void testSetScorerAfterCollectionTerminated() throws IOException { + BucketCollector collector1 = new TotalHitCountBucketCollector(); + BucketCollector collector2 = new TotalHitCountBucketCollector(); + + AtomicBoolean setScorerCalled1 = new AtomicBoolean(); + collector1 = new SetScorerBucketCollector(collector1, setScorerCalled1); + + AtomicBoolean setScorerCalled2 = new AtomicBoolean(); + collector2 = new SetScorerBucketCollector(collector2, setScorerCalled2); + + collector1 = new TerminateAfterBucketCollector(collector1, 1); + collector2 = new TerminateAfterBucketCollector(collector2, 2); + + Scorer scorer = new FakeScorer(); + + List collectors = Arrays.asList(collector1, collector2); + Collections.shuffle(collectors, random()); + BucketCollector collector = MultiBucketCollector.wrap(collectors); + + LeafBucketCollector leafCollector = collector.getLeafCollector(null); + leafCollector.setScorer(scorer); + assertTrue(setScorerCalled1.get()); + assertTrue(setScorerCalled2.get()); + + leafCollector.collect(0); + leafCollector.collect(1); + + setScorerCalled1.set(false); + setScorerCalled2.set(false); + leafCollector.setScorer(scorer); + assertFalse(setScorerCalled1.get()); + assertTrue(setScorerCalled2.get()); + + expectThrows(CollectionTerminatedException.class, () -> { + leafCollector.collect(1); + }); + + setScorerCalled1.set(false); + setScorerCalled2.set(false); + leafCollector.setScorer(scorer); + assertFalse(setScorerCalled1.get()); + assertFalse(setScorerCalled2.get()); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 2f4a3dfd6c1..32db9bf0a2a 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.uid.Versions; @@ -69,7 +70,6 @@ import org.elasticsearch.indices.recovery.RecoverySourceHandler; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryTarget; import org.elasticsearch.indices.recovery.StartRecoveryRequest; -import org.elasticsearch.node.Node; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.Repository; import org.elasticsearch.snapshots.Snapshot; @@ -156,7 +156,6 @@ public abstract class IndexShardTestCase extends ESTestCase { return Settings.EMPTY; } - protected Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException { return createStore(shardPath.getShardId(), indexSettings, newFSDirectory(shardPath.resolveIndex())); } @@ -169,7 +168,6 @@ public abstract class IndexShardTestCase extends ESTestCase { } }; return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); - } /** @@ -179,7 +177,17 @@ public abstract class IndexShardTestCase extends ESTestCase { * another shard) */ protected IndexShard newShard(boolean primary) throws IOException { - return newShard(primary, Settings.EMPTY, new InternalEngineFactory()); + return newShard(primary, Settings.EMPTY); + } + + /** + * Creates a new initializing shard. The shard will have its own unique data path. + * + * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica (ready to recover from + * another shard) + */ + protected IndexShard newShard(final boolean primary, final Settings settings) throws IOException { + return newShard(primary, settings, new InternalEngineFactory()); } /** @@ -318,23 +326,25 @@ public abstract class IndexShardTestCase extends ESTestCase { * @param routing shard routing to use * @param shardPath path to use for shard data * @param indexMetaData indexMetaData for the shard, including any mapping - * @param store an optional custom store to use. If null a default file based store will be created + * @param storeProvider an optional custom store provider to use. If null a default file based store will be created * @param indexSearcherWrapper an optional wrapper to be used during searchers * @param globalCheckpointSyncer callback for syncing global checkpoints * @param indexEventListener index event listener * @param listeners an optional set of listeners to add to the shard */ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetaData indexMetaData, - @Nullable Store store, @Nullable IndexSearcherWrapper indexSearcherWrapper, + @Nullable CheckedFunction storeProvider, + @Nullable IndexSearcherWrapper indexSearcherWrapper, @Nullable EngineFactory engineFactory, Runnable globalCheckpointSyncer, IndexEventListener indexEventListener, IndexingOperationListener... listeners) throws IOException { final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings); final IndexShard indexShard; - if (store == null) { - store = createStore(indexSettings, shardPath); + if (storeProvider == null) { + storeProvider = is -> createStore(is, shardPath); } + final Store store = storeProvider.apply(indexSettings); boolean success = false; try { IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null); @@ -424,7 +434,18 @@ public abstract class IndexShardTestCase extends ESTestCase { */ protected IndexShard newStartedShard( final boolean primary, final Settings settings, final EngineFactory engineFactory) throws IOException { - IndexShard shard = newShard(primary, settings, engineFactory); + return newStartedShard(p -> newShard(p, settings, engineFactory), primary); + } + + /** + * creates a new empty shard and starts it. + * + * @param shardFunction shard factory function + * @param primary controls whether the shard will be a primary or a replica. + */ + protected IndexShard newStartedShard(CheckedFunction shardFunction, + boolean primary) throws IOException { + IndexShard shard = shardFunction.apply(primary); if (primary) { recoverShardFromStore(shard); } else { @@ -550,11 +571,10 @@ public abstract class IndexShardTestCase extends ESTestCase { final StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), targetAllocationId, pNode, rNode, snapshot, replica.routingEntry().primary(), 0, startingSeqNo); final RecoverySourceHandler recovery = new RecoverySourceHandler( - primary, - recoveryTarget, - request, - (int) ByteSizeUnit.MB.toBytes(1), - Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), pNode.getName()).build()); + primary, + recoveryTarget, + request, + (int) ByteSizeUnit.MB.toBytes(1)); primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(), inSyncIds, routingTable, Collections.emptySet()); recovery.recoverToTarget(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java index 8a22383dcae..777f790d2dd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -101,7 +101,7 @@ public class MockFSIndexStore extends IndexStore { if (indexShard != null) { Boolean remove = shardSet.remove(indexShard); if (remove == Boolean.TRUE) { - Logger logger = Loggers.getLogger(getClass(), indexShard.indexSettings().getSettings(), indexShard.shardId()); + Logger logger = Loggers.getLogger(getClass(), indexShard.shardId()); MockFSDirectoryService.checkIndex(logger, indexShard.store(), indexShard.shardId()); } } diff --git a/x-pack/build.gradle b/x-pack/build.gradle index 0672474fdf3..7eca7c91fa3 100644 --- a/x-pack/build.gradle +++ b/x-pack/build.gradle @@ -41,6 +41,7 @@ subprojects { ] } + ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-ccr:${version}": xpackModule('ccr')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-core:${version}": xpackModule('core')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-deprecation:${version}": xpackModule('deprecation')] ext.projectSubstitutions += [ "org.elasticsearch.plugin:x-pack-graph:${version}": xpackModule('graph')] diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 99e62532e2d..f027493b0ab 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -14,17 +14,6 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/security/authorization/run-as-privilege.asciidoc', 'en/security/ccs-clients-integrations/http.asciidoc', 'en/security/authorization/custom-roles-provider.asciidoc', - 'en/rest-api/ml/delete-snapshot.asciidoc', - 'en/rest-api/ml/get-bucket.asciidoc', - 'en/rest-api/ml/get-job-stats.asciidoc', - 'en/rest-api/ml/get-overall-buckets.asciidoc', - 'en/rest-api/ml/get-category.asciidoc', - 'en/rest-api/ml/get-record.asciidoc', - 'en/rest-api/ml/get-influencer.asciidoc', - 'en/rest-api/ml/get-snapshot.asciidoc', - 'en/rest-api/ml/post-data.asciidoc', - 'en/rest-api/ml/revert-snapshot.asciidoc', - 'en/rest-api/ml/update-snapshot.asciidoc', 'en/rest-api/watcher/stats.asciidoc', 'en/watcher/example-watches/watching-time-series-data.asciidoc', ] diff --git a/x-pack/docs/en/ml/api-quickref.asciidoc b/x-pack/docs/en/ml/api-quickref.asciidoc deleted file mode 100644 index be74167862e..00000000000 --- a/x-pack/docs/en/ml/api-quickref.asciidoc +++ /dev/null @@ -1,102 +0,0 @@ -[role="xpack"] -[[ml-api-quickref]] -== API quick reference - -All {ml} endpoints have the following base: - -[source,js] ----- -/_xpack/ml/ ----- -// NOTCONSOLE - -The main {ml} resources can be accessed with a variety of endpoints: - -* <>: Create and manage {ml} jobs -* <>: Select data from {es} to be analyzed -* <>: Access the results of a {ml} job -* <>: Manage model snapshots -//* <>: Validate subsections of job configurations - -[float] -[[ml-api-jobs]] -=== /anomaly_detectors/ - -* {ref}/ml-put-job.html[PUT /anomaly_detectors/+++]: Create a job -* {ref}/ml-open-job.html[POST /anomaly_detectors//_open]: Open a job -* {ref}/ml-post-data.html[POST /anomaly_detectors//_data]: Send data to a job -* {ref}/ml-get-job.html[GET /anomaly_detectors]: List jobs -* {ref}/ml-get-job.html[GET /anomaly_detectors/+++]: Get job details -* {ref}/ml-get-job-stats.html[GET /anomaly_detectors//_stats]: Get job statistics -* {ref}/ml-update-job.html[POST /anomaly_detectors//_update]: Update certain properties of the job configuration -* {ref}/ml-flush-job.html[POST anomaly_detectors//_flush]: Force a job to analyze buffered data -* {ref}/ml-forecast.html[POST anomaly_detectors//_forecast]: Forecast future job behavior -* {ref}/ml-close-job.html[POST /anomaly_detectors//_close]: Close a job -* {ref}/ml-delete-job.html[DELETE /anomaly_detectors/+++]: Delete a job - -[float] -[[ml-api-calendars]] -=== /calendars/ - -* {ref}/ml-put-calendar.html[PUT /calendars/+++]: Create a calendar -* {ref}/ml-post-calendar-event.html[POST /calendars/+++/events]: Add a scheduled event to a calendar -* {ref}/ml-put-calendar-job.html[PUT /calendars/+++/jobs/+++]: Associate a job with a calendar -* {ref}/ml-get-calendar.html[GET /calendars/+++]: Get calendar details -* {ref}/ml-get-calendar-event.html[GET /calendars/+++/events]: Get scheduled event details -* {ref}/ml-delete-calendar-event.html[DELETE /calendars/+++/events/+++]: Remove a scheduled event from a calendar -* {ref}/ml-delete-calendar-job.html[DELETE /calendars/+++/jobs/+++]: Disassociate a job from a calendar -* {ref}/ml-delete-calendar.html[DELETE /calendars/+++]: Delete a calendar - -[float] -[[ml-api-filters]] -=== /filters/ - -* {ref}/ml-put-filter.html[PUT /filters/+++]: Create a filter -* {ref}/ml-update-filter.html[POST /filters/+++/_update]: Update a filter -* {ref}/ml-get-filter.html[GET /filters/+++]: List filters -* {ref}/ml-delete-filter.html[DELETE /filter/+++]: Delete a filter - -[float] -[[ml-api-datafeeds]] -=== /datafeeds/ - -* {ref}/ml-put-datafeed.html[PUT /datafeeds/+++]: Create a {dfeed} -* {ref}/ml-start-datafeed.html[POST /datafeeds//_start]: Start a {dfeed} -* {ref}/ml-get-datafeed.html[GET /datafeeds]: List {dfeeds} -* {ref}/ml-get-datafeed.html[GET /datafeeds/+++]: Get {dfeed} details -* {ref}/ml-get-datafeed-stats.html[GET /datafeeds//_stats]: Get statistical information for {dfeeds} -* {ref}/ml-preview-datafeed.html[GET /datafeeds//_preview]: Get a preview of a {dfeed} -* {ref}/ml-update-datafeed.html[POST /datafeeds//_update]: Update certain settings for a {dfeed} -* {ref}/ml-stop-datafeed.html[POST /datafeeds//_stop]: Stop a {dfeed} -* {ref}/ml-delete-datafeed.html[DELETE /datafeeds/+++]: Delete {dfeed} - -[float] -[[ml-api-results]] -=== /results/ - -* {ref}/ml-get-bucket.html[GET /results/buckets]: List the buckets in the results -* {ref}/ml-get-bucket.html[GET /results/buckets/+++]: Get bucket details -* {ref}/ml-get-overall-buckets.html[GET /results/overall_buckets]: Get overall bucket results for multiple jobs -* {ref}/ml-get-category.html[GET /results/categories]: List the categories in the results -* {ref}/ml-get-category.html[GET /results/categories/+++]: Get category details -* {ref}/ml-get-influencer.html[GET /results/influencers]: Get influencer details -* {ref}/ml-get-record.html[GET /results/records]: Get records from the results - -[float] -[[ml-api-snapshots]] -=== /model_snapshots/ - -* {ref}/ml-get-snapshot.html[GET /model_snapshots]: List model snapshots -* {ref}/ml-get-snapshot.html[GET /model_snapshots/+++]: Get model snapshot details -* {ref}/ml-revert-snapshot.html[POST /model_snapshots//_revert]: Revert a model snapshot -* {ref}/ml-update-snapshot.html[POST /model_snapshots//_update]: Update certain settings for a model snapshot -* {ref}/ml-delete-snapshot.html[DELETE /model_snapshots/+++]: Delete a model snapshot - -//// -[float] -[[ml-api-validate]] -=== /validate/ - -* {ref}/ml-valid-detector.html[POST /anomaly_detectors/_validate/detector]: Validate a detector -* {ref}/ml-valid-job.html[POST /anomaly_detectors/_validate]: Validate a job -//// diff --git a/x-pack/docs/en/rest-api/defs.asciidoc b/x-pack/docs/en/rest-api/defs.asciidoc deleted file mode 100644 index ed53929391b..00000000000 --- a/x-pack/docs/en/rest-api/defs.asciidoc +++ /dev/null @@ -1,36 +0,0 @@ -[role="xpack"] -[[ml-api-definitions]] -== Definitions - -These resource definitions are used in {ml} and {security} APIs and in {kib} -advanced {ml} job configuration options. - -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> -* <> - -[role="xpack"] -include::ml/calendarresource.asciidoc[] -[role="xpack"] -include::ml/datafeedresource.asciidoc[] -[role="xpack"] -include::ml/filterresource.asciidoc[] -[role="xpack"] -include::ml/jobresource.asciidoc[] -[role="xpack"] -include::ml/jobcounts.asciidoc[] -[role="xpack"] -include::security/role-mapping-resources.asciidoc[] -[role="xpack"] -include::ml/snapshotresource.asciidoc[] -[role="xpack"] -include::ml/resultsresource.asciidoc[] -[role="xpack"] -include::ml/eventresource.asciidoc[] diff --git a/x-pack/plugin/ccr/build.gradle b/x-pack/plugin/ccr/build.gradle new file mode 100644 index 00000000000..0b1f889a2c1 --- /dev/null +++ b/x-pack/plugin/ccr/build.gradle @@ -0,0 +1,60 @@ +import com.carrotsearch.gradle.junit4.RandomizedTestingTask +import org.elasticsearch.gradle.BuildPlugin + +evaluationDependsOn(xpackModule('core')) + +apply plugin: 'elasticsearch.esplugin' +esplugin { + name 'x-pack-ccr' + description 'Elasticsearch Expanded Pack Plugin - CCR' + classname 'org.elasticsearch.xpack.ccr.Ccr' + hasNativeController false + requiresKeystore true + extendedPlugins = ['x-pack-core'] +} +archivesBaseName = 'x-pack-ccr' + +integTest.enabled = false + +compileJava.options.compilerArgs << "-Xlint:-try" +compileTestJava.options.compilerArgs << "-Xlint:-try" + +// Instead we create a separate task to run the +// tests based on ESIntegTestCase +task internalClusterTest(type: RandomizedTestingTask, + group: JavaBasePlugin.VERIFICATION_GROUP, + description: 'Java fantasy integration tests', + dependsOn: test.dependsOn) { + configure(BuildPlugin.commonTestConfig(project)) + classpath = project.test.classpath + testClassesDirs = project.test.testClassesDirs + include '**/*IT.class' + systemProperty 'es.set.netty.runtime.available.processors', 'false' +} + +check.dependsOn internalClusterTest +internalClusterTest.mustRunAfter test + +// add all sub-projects of the qa sub-project +gradle.projectsEvaluated { + project.subprojects + .find { it.path == project.path + ":qa" } + .subprojects + .findAll { it.path.startsWith(project.path + ":qa") } + .each { check.dependsOn it.check } +} + +dependencies { + compileOnly "org.elasticsearch:elasticsearch:${version}" + + compileOnly project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +dependencyLicenses { + ignoreSha 'x-pack-core' +} + +run { + plugin xpackModule('core') +} diff --git a/x-pack/plugin/ccr/qa/build.gradle b/x-pack/plugin/ccr/qa/build.gradle new file mode 100644 index 00000000000..4a007422f38 --- /dev/null +++ b/x-pack/plugin/ccr/qa/build.gradle @@ -0,0 +1,12 @@ + +/* Remove assemble on all qa projects because we don't need to publish + * artifacts for them. */ +gradle.projectsEvaluated { + subprojects { + Task assemble = project.tasks.findByName('assemble') + if (assemble) { + project.tasks.remove(assemble) + project.build.dependsOn.remove('assemble') + } + } +} diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/build.gradle new file mode 100644 index 00000000000..97d4008eb8c --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/build.gradle @@ -0,0 +1,40 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('ccr'), configuration: 'runtime') +} + +task leaderClusterTest(type: RestIntegTestTask) { + mustRunAfter(precommit) +} + +leaderClusterTestCluster { + numNodes = 1 + clusterName = 'leader-cluster' +} + +leaderClusterTestRunner { + systemProperty 'tests.is_leader_cluster', 'true' +} + +task followClusterTest(type: RestIntegTestTask) {} + +followClusterTestCluster { + dependsOn leaderClusterTestRunner + numNodes = 1 + clusterName = 'follow-cluster' + setting 'xpack.license.self_generated.type', 'trial' + setting 'search.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" +} + +followClusterTestRunner { + systemProperty 'tests.is_leader_cluster', 'false' + systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + finalizedBy 'leaderClusterTestCluster#stop' +} + +check.dependsOn followClusterTest +test.enabled = false diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java b/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java new file mode 100644 index 00000000000..06d9f91c7ab --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-incompatible-license/src/test/java/org/elasticsearch/xpack/ccr/CcrMultiClusterLicenseIT.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.util.Locale; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; + +public class CcrMultiClusterLicenseIT extends ESRestTestCase { + + private final boolean runningAgainstLeaderCluster = Booleans.parseBoolean(System.getProperty("tests.is_leader_cluster")); + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + public void testFollowIndex() { + if (runningAgainstLeaderCluster == false) { + final Request request = new Request("POST", "/follower/_ccr/follow"); + request.setJsonEntity("{\"leader_index\": \"leader_cluster:leader\"}"); + assertLicenseIncompatible(request); + } + } + + public void testCreateAndFollowIndex() { + if (runningAgainstLeaderCluster == false) { + final Request request = new Request("POST", "/follower/_ccr/create_and_follow"); + request.setJsonEntity("{\"leader_index\": \"leader_cluster:leader\"}"); + assertLicenseIncompatible(request); + } + } + + private static void assertLicenseIncompatible(final Request request) { + final ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(request)); + final String expected = String.format( + Locale.ROOT, + "can not fetch remote index [%s] metadata as the remote cluster [%s] is not licensed for [ccr]; " + + "the license mode [BASIC] on cluster [%s] does not enable [ccr]", + "leader_cluster:leader", + "leader_cluster", + "leader_cluster"); + assertThat(e, hasToString(containsString(expected))); + } + +} diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle new file mode 100644 index 00000000000..897aed0110e --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/build.gradle @@ -0,0 +1,75 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('ccr'), configuration: 'runtime') +} + +task leaderClusterTest(type: RestIntegTestTask) { + mustRunAfter(precommit) +} + +leaderClusterTestCluster { + numNodes = 1 + clusterName = 'leader-cluster' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.monitoring.enabled', 'false' + extraConfigFile 'roles.yml', 'roles.yml' + setupCommand 'setupTestAdmin', + 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" + setupCommand 'setupCcrUser', + 'bin/elasticsearch-users', 'useradd', "test_ccr", '-p', 'x-pack-test-password', '-r', "manage_ccr" + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_admin', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} + +leaderClusterTestRunner { + systemProperty 'tests.is_leader_cluster', 'true' +} + +task followClusterTest(type: RestIntegTestTask) {} + +followClusterTestCluster { + dependsOn leaderClusterTestRunner + numNodes = 1 + clusterName = 'follow-cluster' + setting 'search.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.monitoring.enabled', 'false' + extraConfigFile 'roles.yml', 'roles.yml' + setupCommand 'setupTestAdmin', + 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" + setupCommand 'setupCcrUser', + 'bin/elasticsearch-users', 'useradd', "test_ccr", '-p', 'x-pack-test-password', '-r', "ccruser" + waitCondition = { node, ant -> + File tmpFile = new File(node.cwd, 'wait.success') + ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=>=${numNodes}&wait_for_status=yellow", + dest: tmpFile.toString(), + username: 'test_admin', + password: 'x-pack-test-password', + ignoreerrors: true, + retries: 10) + return tmpFile.exists() + } +} + +followClusterTestRunner { + systemProperty 'tests.is_leader_cluster', 'false' + systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + finalizedBy 'leaderClusterTestCluster#stop' +} + +check.dependsOn followClusterTest +test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/roles.yml b/x-pack/plugin/ccr/qa/multi-cluster-with-security/roles.yml new file mode 100644 index 00000000000..700a2416c66 --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/roles.yml @@ -0,0 +1,9 @@ +ccruser: + cluster: + - manage_ccr + indices: + - names: [ 'allowed-index' ] + privileges: + - monitor + - read + - write diff --git a/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java new file mode 100644 index 00000000000..7d658550d92 --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster-with-security/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexSecurityIT.java @@ -0,0 +1,194 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.equalTo; + +public class FollowIndexSecurityIT extends ESRestTestCase { + + private final boolean runningAgainstLeaderCluster = Booleans.parseBoolean(System.getProperty("tests.is_leader_cluster")); + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("test_ccr", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("test_admin", new SecureString("x-pack-test-password".toCharArray())); + return Settings.builder() + .put(ThreadContext.PREFIX + ".Authorization", token) + .build(); + } + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + public void testFollowIndex() throws Exception { + final int numDocs = 16; + final String allowedIndex = "allowed-index"; + final String unallowedIndex = "unallowed-index"; + if (runningAgainstLeaderCluster) { + logger.info("Running against leader cluster"); + Settings indexSettings = Settings.builder().put("index.soft_deletes.enabled", true).build(); + createIndex(allowedIndex, indexSettings); + createIndex(unallowedIndex, indexSettings); + for (int i = 0; i < numDocs; i++) { + logger.info("Indexing doc [{}]", i); + index(allowedIndex, Integer.toString(i), "field", i); + } + for (int i = 0; i < numDocs; i++) { + logger.info("Indexing doc [{}]", i); + index(unallowedIndex, Integer.toString(i), "field", i); + } + refresh(allowedIndex); + verifyDocuments(adminClient(), allowedIndex, numDocs); + } else { + createAndFollowIndex("leader_cluster:" + allowedIndex, allowedIndex); + assertBusy(() -> verifyDocuments(client(), allowedIndex, numDocs)); + assertThat(countCcrNodeTasks(), equalTo(1)); + assertOK(client().performRequest(new Request("POST", "/" + allowedIndex + "/_ccr/unfollow"))); + // Make sure that there are no other ccr relates operations running: + assertBusy(() -> { + Map clusterState = toMap(adminClient().performRequest(new Request("GET", "/_cluster/state"))); + List tasks = (List) XContentMapValues.extractValue("metadata.persistent_tasks.tasks", clusterState); + assertThat(tasks.size(), equalTo(0)); + assertThat(countCcrNodeTasks(), equalTo(0)); + }); + + followIndex("leader_cluster:" + allowedIndex, allowedIndex); + assertThat(countCcrNodeTasks(), equalTo(1)); + assertOK(client().performRequest(new Request("POST", "/" + allowedIndex + "/_ccr/unfollow"))); + // Make sure that there are no other ccr relates operations running: + assertBusy(() -> { + Map clusterState = toMap(adminClient().performRequest(new Request("GET", "/_cluster/state"))); + List tasks = (List) XContentMapValues.extractValue("metadata.persistent_tasks.tasks", clusterState); + assertThat(tasks.size(), equalTo(0)); + assertThat(countCcrNodeTasks(), equalTo(0)); + }); + + createAndFollowIndex("leader_cluster:" + unallowedIndex, unallowedIndex); + // Verify that nothing has been replicated and no node tasks are running + // These node tasks should have been failed due to the fact that the user + // has no sufficient priviledges. + assertBusy(() -> assertThat(countCcrNodeTasks(), equalTo(0))); + verifyDocuments(adminClient(), unallowedIndex, 0); + + followIndex("leader_cluster:" + unallowedIndex, unallowedIndex); + assertBusy(() -> assertThat(countCcrNodeTasks(), equalTo(0))); + verifyDocuments(adminClient(), unallowedIndex, 0); + } + } + + private int countCcrNodeTasks() throws IOException { + final Request request = new Request("GET", "/_tasks"); + request.addParameter("detailed", "true"); + Map rsp1 = toMap(adminClient().performRequest(request)); + Map nodes = (Map) rsp1.get("nodes"); + assertThat(nodes.size(), equalTo(1)); + Map node = (Map) nodes.values().iterator().next(); + Map nodeTasks = (Map) node.get("tasks"); + int numNodeTasks = 0; + for (Map.Entry entry : nodeTasks.entrySet()) { + Map nodeTask = (Map) entry.getValue(); + String action = (String) nodeTask.get("action"); + if (action.startsWith("xpack/ccr/shard_follow_task")) { + numNodeTasks++; + } + } + return numNodeTasks; + } + + private static void index(String index, String id, Object... fields) throws IOException { + XContentBuilder document = jsonBuilder().startObject(); + for (int i = 0; i < fields.length; i += 2) { + document.field((String) fields[i], fields[i + 1]); + } + document.endObject(); + final Request request = new Request("POST", "/" + index + "/_doc/" + id); + request.setJsonEntity(Strings.toString(document)); + assertOK(adminClient().performRequest(request)); + } + + private static void refresh(String index) throws IOException { + assertOK(adminClient().performRequest(new Request("POST", "/" + index + "/_refresh"))); + } + + private static void followIndex(String leaderIndex, String followIndex) throws IOException { + final Request request = new Request("POST", "/" + followIndex + "/_ccr/follow"); + request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"idle_shard_retry_delay\": \"10ms\"}"); + assertOK(client().performRequest(request)); + } + + private static void createAndFollowIndex(String leaderIndex, String followIndex) throws IOException { + final Request request = new Request("POST", "/" + followIndex + "/_ccr/create_and_follow"); + request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"idle_shard_retry_delay\": \"10ms\"}"); + assertOK(client().performRequest(request)); + } + + void verifyDocuments(RestClient client, String index, int expectedNumDocs) throws IOException { + final Request request = new Request("GET", "/" + index + "/_search"); + request.addParameter("pretty", "true"); + request.addParameter("size", Integer.toString(expectedNumDocs)); + request.addParameter("sort", "field:asc"); + Map response = toMap(client.performRequest(request)); + + int numDocs = (int) XContentMapValues.extractValue("hits.total", response); + assertThat(numDocs, equalTo(expectedNumDocs)); + + List hits = (List) XContentMapValues.extractValue("hits.hits", response); + assertThat(hits.size(), equalTo(expectedNumDocs)); + for (int i = 0; i < expectedNumDocs; i++) { + int value = (int) XContentMapValues.extractValue("_source.field", (Map) hits.get(i)); + assertThat(i, equalTo(value)); + } + } + + private static Map toMap(Response response) throws IOException { + return toMap(EntityUtils.toString(response.getEntity())); + } + + private static Map toMap(String response) { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false); + } + + protected static void createIndex(String name, Settings settings) throws IOException { + createIndex(name, settings, ""); + } + + protected static void createIndex(String name, Settings settings, String mapping) throws IOException { + final Request request = new Request("PUT", "/" + name); + request.setJsonEntity("{ \"settings\": " + Strings.toString(settings) + ", \"mappings\" : {" + mapping + "} }"); + assertOK(adminClient().performRequest(request)); + } + +} diff --git a/x-pack/plugin/ccr/qa/multi-cluster/build.gradle b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle new file mode 100644 index 00000000000..cc726e1a652 --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster/build.gradle @@ -0,0 +1,41 @@ +import org.elasticsearch.gradle.test.RestIntegTestTask + +apply plugin: 'elasticsearch.standalone-test' + +dependencies { + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') + testCompile project(path: xpackModule('ccr'), configuration: 'runtime') +} + +task leaderClusterTest(type: RestIntegTestTask) { + mustRunAfter(precommit) +} + +leaderClusterTestCluster { + numNodes = 1 + clusterName = 'leader-cluster' + setting 'xpack.license.self_generated.type', 'trial' +} + +leaderClusterTestRunner { + systemProperty 'tests.is_leader_cluster', 'true' +} + +task followClusterTest(type: RestIntegTestTask) {} + +followClusterTestCluster { + dependsOn leaderClusterTestRunner + numNodes = 1 + clusterName = 'follow-cluster' + setting 'xpack.license.self_generated.type', 'trial' + setting 'search.remote.leader_cluster.seeds', "\"${-> leaderClusterTest.nodes.get(0).transportUri()}\"" +} + +followClusterTestRunner { + systemProperty 'tests.is_leader_cluster', 'false' + systemProperty 'tests.leader_host', "${-> leaderClusterTest.nodes.get(0).httpUri()}" + finalizedBy 'leaderClusterTestCluster#stop' +} + +check.dependsOn followClusterTest +test.enabled = false // no unit tests for multi-cluster-search, only the rest integration test diff --git a/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java new file mode 100644 index 00000000000..c14e13e7bb0 --- /dev/null +++ b/x-pack/plugin/ccr/qa/multi-cluster/src/test/java/org/elasticsearch/xpack/ccr/FollowIndexIT.java @@ -0,0 +1,147 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr; + +import org.apache.http.HttpHost; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + +public class FollowIndexIT extends ESRestTestCase { + + private final boolean runningAgainstLeaderCluster = Booleans.parseBoolean(System.getProperty("tests.is_leader_cluster")); + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + public void testFollowIndex() throws Exception { + final int numDocs = 128; + final String leaderIndexName = "test_index1"; + if (runningAgainstLeaderCluster) { + logger.info("Running against leader cluster"); + String mapping = ""; + if (randomBoolean()) { // randomly do source filtering on indexing + mapping = + "\"_doc\": {" + + " \"_source\": {" + + " \"includes\": [\"field\"]," + + " \"excludes\": [\"filtered_field\"]" + + " }"+ + "}"; + } + Settings indexSettings = Settings.builder() + .put("index.soft_deletes.enabled", true) + .build(); + createIndex(leaderIndexName, indexSettings, mapping); + for (int i = 0; i < numDocs; i++) { + logger.info("Indexing doc [{}]", i); + index(client(), leaderIndexName, Integer.toString(i), "field", i, "filtered_field", "true"); + } + refresh(leaderIndexName); + verifyDocuments(leaderIndexName, numDocs); + } else { + logger.info("Running against follow cluster"); + final String followIndexName = "test_index2"; + createAndFollowIndex("leader_cluster:" + leaderIndexName, followIndexName); + assertBusy(() -> verifyDocuments(followIndexName, numDocs)); + // unfollow and then follow and then index a few docs in leader index: + unfollowIndex(followIndexName); + followIndex("leader_cluster:" + leaderIndexName, followIndexName); + try (RestClient leaderClient = buildLeaderClient()) { + int id = numDocs; + index(leaderClient, leaderIndexName, Integer.toString(id), "field", id, "filtered_field", "true"); + index(leaderClient, leaderIndexName, Integer.toString(id + 1), "field", id + 1, "filtered_field", "true"); + index(leaderClient, leaderIndexName, Integer.toString(id + 2), "field", id + 2, "filtered_field", "true"); + } + assertBusy(() -> verifyDocuments(followIndexName, numDocs + 3)); + } + } + + private static void index(RestClient client, String index, String id, Object... fields) throws IOException { + XContentBuilder document = jsonBuilder().startObject(); + for (int i = 0; i < fields.length; i += 2) { + document.field((String) fields[i], fields[i + 1]); + } + document.endObject(); + final Request request = new Request("POST", "/" + index + "/_doc/" + id); + request.setJsonEntity(Strings.toString(document)); + assertOK(client.performRequest(request)); + } + + private static void refresh(String index) throws IOException { + assertOK(client().performRequest(new Request("POST", "/" + index + "/_refresh"))); + } + + private static void followIndex(String leaderIndex, String followIndex) throws IOException { + final Request request = new Request("POST", "/" + followIndex + "/_ccr/follow"); + request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"idle_shard_retry_delay\": \"10ms\"}"); + assertOK(client().performRequest(request)); + } + + private static void createAndFollowIndex(String leaderIndex, String followIndex) throws IOException { + final Request request = new Request("POST", "/" + followIndex + "/_ccr/create_and_follow"); + request.setJsonEntity("{\"leader_index\": \"" + leaderIndex + "\", \"idle_shard_retry_delay\": \"10ms\"}"); + assertOK(client().performRequest(request)); + } + + private static void unfollowIndex(String followIndex) throws IOException { + assertOK(client().performRequest(new Request("POST", "/" + followIndex + "/_ccr/unfollow"))); + } + + private static void verifyDocuments(String index, int expectedNumDocs) throws IOException { + final Request request = new Request("GET", "/" + index + "/_search"); + request.addParameter("size", Integer.toString(expectedNumDocs)); + request.addParameter("sort", "field:asc"); + request.addParameter("q", "filtered_field:true"); + Map response = toMap(client().performRequest(request)); + + int numDocs = (int) XContentMapValues.extractValue("hits.total", response); + assertThat(numDocs, equalTo(expectedNumDocs)); + + List hits = (List) XContentMapValues.extractValue("hits.hits", response); + assertThat(hits.size(), equalTo(expectedNumDocs)); + for (int i = 0; i < expectedNumDocs; i++) { + int value = (int) XContentMapValues.extractValue("_source.field", (Map) hits.get(i)); + assertThat(i, equalTo(value)); + } + } + + private static Map toMap(Response response) throws IOException { + return toMap(EntityUtils.toString(response.getEntity())); + } + + private static Map toMap(String response) { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false); + } + + private RestClient buildLeaderClient() throws IOException { + assert runningAgainstLeaderCluster == false; + String leaderUrl = System.getProperty("tests.leader_host"); + int portSeparator = leaderUrl.lastIndexOf(':'); + HttpHost httpHost = new HttpHost(leaderUrl.substring(0, portSeparator), + Integer.parseInt(leaderUrl.substring(portSeparator + 1)), getProtocol()); + return buildClient(Settings.EMPTY, new HttpHost[]{httpHost}); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java new file mode 100644 index 00000000000..b00883f5c2a --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.PersistentTaskPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ExecutorBuilder; +import org.elasticsearch.threadpool.FixedExecutorBuilder; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.ccr.action.ShardChangesAction; +import org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask; +import org.elasticsearch.xpack.ccr.action.ShardFollowTask; +import org.elasticsearch.xpack.ccr.action.ShardFollowTasksExecutor; +import org.elasticsearch.xpack.ccr.action.TransportCcrStatsAction; +import org.elasticsearch.xpack.ccr.action.UnfollowIndexAction; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsAction; +import org.elasticsearch.xpack.ccr.action.bulk.TransportBulkShardOperationsAction; +import org.elasticsearch.xpack.ccr.index.engine.FollowingEngineFactory; +import org.elasticsearch.xpack.ccr.rest.RestCcrStatsAction; +import org.elasticsearch.xpack.ccr.rest.RestCreateAndFollowIndexAction; +import org.elasticsearch.xpack.ccr.rest.RestFollowIndexAction; +import org.elasticsearch.xpack.ccr.rest.RestUnfollowIndexAction; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.function.Supplier; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.xpack.ccr.CcrSettings.CCR_ENABLED_SETTING; +import static org.elasticsearch.xpack.ccr.CcrSettings.CCR_FOLLOWING_INDEX_SETTING; + +/** + * Container class for CCR functionality. + */ +public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, EnginePlugin { + + public static final String CCR_THREAD_POOL_NAME = "ccr"; + + private final boolean enabled; + private final Settings settings; + private final CcrLicenseChecker ccrLicenseChecker; + + /** + * Construct an instance of the CCR container with the specified settings. + * + * @param settings the settings + */ + @SuppressWarnings("unused") // constructed reflectively by the plugin infrastructure + public Ccr(final Settings settings) { + this(settings, new CcrLicenseChecker()); + } + + /** + * Construct an instance of the CCR container with the specified settings and license checker. + * + * @param settings the settings + * @param ccrLicenseChecker the CCR license checker + */ + Ccr(final Settings settings, final CcrLicenseChecker ccrLicenseChecker) { + this.settings = settings; + this.enabled = CCR_ENABLED_SETTING.get(settings); + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + } + + @Override + public Collection createComponents( + final Client client, + final ClusterService clusterService, + final ThreadPool threadPool, + final ResourceWatcherService resourceWatcherService, + final ScriptService scriptService, + final NamedXContentRegistry xContentRegistry, + final Environment environment, + final NodeEnvironment nodeEnvironment, + final NamedWriteableRegistry namedWriteableRegistry) { + return Collections.singleton(ccrLicenseChecker); + } + + @Override + public List> getPersistentTasksExecutor(ClusterService clusterService, + ThreadPool threadPool, Client client) { + return Collections.singletonList(new ShardFollowTasksExecutor(settings, client, threadPool)); + } + + public List> getActions() { + if (enabled == false) { + return emptyList(); + } + + return Arrays.asList( + new ActionHandler<>(BulkShardOperationsAction.INSTANCE, TransportBulkShardOperationsAction.class), + new ActionHandler<>(CcrStatsAction.INSTANCE, TransportCcrStatsAction.class), + new ActionHandler<>(CreateAndFollowIndexAction.INSTANCE, CreateAndFollowIndexAction.TransportAction.class), + new ActionHandler<>(FollowIndexAction.INSTANCE, FollowIndexAction.TransportAction.class), + new ActionHandler<>(ShardChangesAction.INSTANCE, ShardChangesAction.TransportAction.class), + new ActionHandler<>(UnfollowIndexAction.INSTANCE, UnfollowIndexAction.TransportAction.class)); + } + + public List getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster) { + return Arrays.asList( + new RestCcrStatsAction(settings, restController), + new RestCreateAndFollowIndexAction(settings, restController), + new RestFollowIndexAction(settings, restController), + new RestUnfollowIndexAction(settings, restController)); + } + + public List getNamedWriteables() { + return Arrays.asList( + // Persistent action requests + new NamedWriteableRegistry.Entry(PersistentTaskParams.class, ShardFollowTask.NAME, + ShardFollowTask::new), + + // Task statuses + new NamedWriteableRegistry.Entry(Task.Status.class, ShardFollowNodeTask.Status.STATUS_PARSER_NAME, + ShardFollowNodeTask.Status::new) + ); + } + + public List getNamedXContent() { + return Arrays.asList( + // Persistent action requests + new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(ShardFollowTask.NAME), + ShardFollowTask::fromXContent), + + // Task statuses + new NamedXContentRegistry.Entry( + ShardFollowNodeTask.Status.class, + new ParseField(ShardFollowNodeTask.Status.STATUS_PARSER_NAME), + ShardFollowNodeTask.Status::fromXContent)); + } + + /** + * The settings defined by CCR. + * + * @return the settings + */ + public List> getSettings() { + return CcrSettings.getSettings(); + } + + /** + * The optional engine factory for CCR. This method inspects the index settings for the {@link CcrSettings#CCR_FOLLOWING_INDEX_SETTING} + * setting to determine whether or not the engine implementation should be a following engine. + * + * @return the optional engine factory + */ + public Optional getEngineFactory(final IndexSettings indexSettings) { + if (CCR_FOLLOWING_INDEX_SETTING.get(indexSettings.getSettings())) { + return Optional.of(new FollowingEngineFactory()); + } else { + return Optional.empty(); + } + } + + public List> getExecutorBuilders(Settings settings) { + if (enabled == false) { + return Collections.emptyList(); + } + + FixedExecutorBuilder ccrTp = new FixedExecutorBuilder(settings, CCR_THREAD_POOL_NAME, + 32, 100, "xpack.ccr.ccr_thread_pool"); + + return Collections.singletonList(ccrTp); + } + + protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java new file mode 100644 index 00000000000..cefa490f4f7 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrLicenseChecker.java @@ -0,0 +1,141 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.license.RemoteClusterLicenseChecker; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.util.Collections; +import java.util.Locale; +import java.util.Objects; +import java.util.function.BooleanSupplier; +import java.util.function.Consumer; + +/** + * Encapsulates licensing checking for CCR. + */ +public final class CcrLicenseChecker { + + private final BooleanSupplier isCcrAllowed; + + /** + * Constructs a CCR license checker with the default rule based on the license state for checking if CCR is allowed. + */ + CcrLicenseChecker() { + this(XPackPlugin.getSharedLicenseState()::isCcrAllowed); + } + + /** + * Constructs a CCR license checker with the specified boolean supplier. + * + * @param isCcrAllowed a boolean supplier that should return true if CCR is allowed and false otherwise + */ + CcrLicenseChecker(final BooleanSupplier isCcrAllowed) { + this.isCcrAllowed = Objects.requireNonNull(isCcrAllowed); + } + + /** + * Returns whether or not CCR is allowed. + * + * @return true if CCR is allowed, otherwise false + */ + public boolean isCcrAllowed() { + return isCcrAllowed.getAsBoolean(); + } + + /** + * Fetches the leader index metadata from the remote cluster. Before fetching the index metadata, the remote cluster is checked for + * license compatibility with CCR. If the remote cluster is not licensed for CCR, the {@link ActionListener#onFailure(Exception)} method + * of the specified listener is invoked. Otherwise, the specified consumer is invoked with the leader index metadata fetched from the + * remote cluster. + * + * @param client the client + * @param clusterAlias the remote cluster alias + * @param leaderIndex the name of the leader index + * @param listener the listener + * @param leaderIndexMetadataConsumer the leader index metadata consumer + * @param the type of response the listener is waiting for + */ + public void checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( + final Client client, + final String clusterAlias, + final String leaderIndex, + final ActionListener listener, + final Consumer leaderIndexMetadataConsumer) { + // we have to check the license on the remote cluster + new RemoteClusterLicenseChecker(client, XPackLicenseState::isCcrAllowedForOperationMode).checkRemoteClusterLicenses( + Collections.singletonList(clusterAlias), + new ActionListener() { + + @Override + public void onResponse(final RemoteClusterLicenseChecker.LicenseCheck licenseCheck) { + if (licenseCheck.isSuccess()) { + final Client remoteClient = client.getRemoteClusterClient(clusterAlias); + final ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.clear(); + clusterStateRequest.metaData(true); + clusterStateRequest.indices(leaderIndex); + final ActionListener clusterStateListener = ActionListener.wrap( + r -> { + final ClusterState remoteClusterState = r.getState(); + final IndexMetaData leaderIndexMetadata = + remoteClusterState.getMetaData().index(leaderIndex); + leaderIndexMetadataConsumer.accept(leaderIndexMetadata); + }, + listener::onFailure); + // following an index in remote cluster, so use remote client to fetch leader index metadata + remoteClient.admin().cluster().state(clusterStateRequest, clusterStateListener); + } else { + listener.onFailure(incompatibleRemoteLicense(leaderIndex, licenseCheck)); + } + } + + @Override + public void onFailure(final Exception e) { + listener.onFailure(unknownRemoteLicense(leaderIndex, clusterAlias, e)); + } + + }); + } + + private static ElasticsearchStatusException incompatibleRemoteLicense( + final String leaderIndex, final RemoteClusterLicenseChecker.LicenseCheck licenseCheck) { + final String clusterAlias = licenseCheck.remoteClusterLicenseInfo().clusterAlias(); + final String message = String.format( + Locale.ROOT, + "can not fetch remote index [%s:%s] metadata as the remote cluster [%s] is not licensed for [ccr]; %s", + clusterAlias, + leaderIndex, + clusterAlias, + RemoteClusterLicenseChecker.buildErrorMessage( + "ccr", + licenseCheck.remoteClusterLicenseInfo(), + RemoteClusterLicenseChecker::isLicensePlatinumOrTrial)); + return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST); + } + + private static ElasticsearchStatusException unknownRemoteLicense( + final String leaderIndex, final String clusterAlias, final Exception cause) { + final String message = String.format( + Locale.ROOT, + "can not fetch remote index [%s:%s] metadata as the license state of the remote cluster [%s] could not be determined", + clusterAlias, + leaderIndex, + clusterAlias); + return new ElasticsearchStatusException(message, RestStatus.BAD_REQUEST, cause); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java new file mode 100644 index 00000000000..6960766bad0 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CcrSettings.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; + +import java.util.Arrays; +import java.util.List; + +/** + * Container class for CCR settings. + */ +public final class CcrSettings { + + // prevent construction + private CcrSettings() { + + } + + /** + * Setting for controlling whether or not CCR is enabled. + */ + static final Setting CCR_ENABLED_SETTING = Setting.boolSetting("xpack.ccr.enabled", true, Property.NodeScope); + + /** + * Index setting for a following index. + */ + public static final Setting CCR_FOLLOWING_INDEX_SETTING = + Setting.boolSetting("index.xpack.ccr.following_index", false, Setting.Property.IndexScope); + + /** + * The settings defined by CCR. + * + * @return the settings + */ + static List> getSettings() { + return Arrays.asList( + CCR_ENABLED_SETTING, + CCR_FOLLOWING_INDEX_SETTING); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java new file mode 100644 index 00000000000..b5d6697fc73 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CcrStatsAction.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.tasks.BaseTasksRequest; +import org.elasticsearch.action.support.tasks.BaseTasksResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.tasks.Task; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +public class CcrStatsAction extends Action { + + public static final String NAME = "cluster:monitor/ccr/stats"; + + public static final CcrStatsAction INSTANCE = new CcrStatsAction(); + + private CcrStatsAction() { + super(NAME); + } + + @Override + public TasksResponse newResponse() { + return new TasksResponse(); + } + + public static class TasksResponse extends BaseTasksResponse implements ToXContentObject { + + private final List taskResponses; + + public TasksResponse() { + this(Collections.emptyList(), Collections.emptyList(), Collections.emptyList()); + } + + TasksResponse( + final List taskFailures, + final List nodeFailures, + final List taskResponses) { + super(taskFailures, nodeFailures); + this.taskResponses = taskResponses; + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + // sort by index name, then shard ID + final Map> taskResponsesByIndex = new TreeMap<>(); + for (final TaskResponse taskResponse : taskResponses) { + taskResponsesByIndex.computeIfAbsent( + taskResponse.followerShardId().getIndexName(), + k -> new TreeMap<>()).put(taskResponse.followerShardId().getId(), taskResponse); + } + builder.startObject(); + { + for (final Map.Entry> index : taskResponsesByIndex.entrySet()) { + builder.startArray(index.getKey()); + { + for (final Map.Entry shard : index.getValue().entrySet()) { + shard.getValue().status().toXContent(builder, params); + } + } + builder.endArray(); + } + } + builder.endObject(); + return builder; + } + } + + public static class TasksRequest extends BaseTasksRequest implements IndicesRequest { + + private String[] indices; + + @Override + public String[] indices() { + return indices; + } + + public void setIndices(final String[] indices) { + this.indices = indices; + } + + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public void setIndicesOptions(final IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + } + + @Override + public boolean match(final Task task) { + /* + * This is a limitation of the current tasks API. When the transport action is executed, the tasks API invokes this match method + * to find the tasks on which to execute the task-level operation (see TransportTasksAction#nodeOperation and + * TransportTasksAction#processTasks). If we do the matching here, then we can not match index patterns. Therefore, we override + * TransportTasksAction#processTasks (see TransportCcrStatsAction#processTasks) and do the matching there. We should never see + * this method invoked and since we can not support matching a task on the basis of the request here, we throw that this + * operation is unsupported. + */ + throw new UnsupportedOperationException(); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + } + + } + + public static class TaskResponse implements Writeable { + + private final ShardId followerShardId; + + ShardId followerShardId() { + return followerShardId; + } + + private final ShardFollowNodeTask.Status status; + + ShardFollowNodeTask.Status status() { + return status; + } + + TaskResponse(final ShardId followerShardId, final ShardFollowNodeTask.Status status) { + this.followerShardId = followerShardId; + this.status = status; + } + + TaskResponse(final StreamInput in) throws IOException { + this.followerShardId = ShardId.readShardId(in); + this.status = new ShardFollowNodeTask.Status(in); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + followerShardId.writeTo(out); + status.writeTo(out); + } + + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java new file mode 100644 index 00000000000..2e36bca2932 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexAction.java @@ -0,0 +1,355 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.ResourceAlreadyExistsException; +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.ActiveShardsObserver; +import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.AckedClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.CcrSettings; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class CreateAndFollowIndexAction extends Action { + + public static final CreateAndFollowIndexAction INSTANCE = new CreateAndFollowIndexAction(); + public static final String NAME = "cluster:admin/xpack/ccr/create_and_follow_index"; + + private CreateAndFollowIndexAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends AcknowledgedRequest { + + private FollowIndexAction.Request followRequest; + + public Request(FollowIndexAction.Request followRequest) { + this.followRequest = Objects.requireNonNull(followRequest); + } + + Request() { + } + + public FollowIndexAction.Request getFollowRequest() { + return followRequest; + } + + @Override + public ActionRequestValidationException validate() { + return followRequest.validate(); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + followRequest = new FollowIndexAction.Request(); + followRequest.readFrom(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + followRequest.writeTo(out); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return Objects.equals(followRequest, request.followRequest); + } + + @Override + public int hashCode() { + return Objects.hash(followRequest); + } + } + + public static class Response extends ActionResponse implements ToXContentObject { + + private boolean followIndexCreated; + private boolean followIndexShardsAcked; + private boolean indexFollowingStarted; + + Response() { + } + + Response(boolean followIndexCreated, boolean followIndexShardsAcked, boolean indexFollowingStarted) { + this.followIndexCreated = followIndexCreated; + this.followIndexShardsAcked = followIndexShardsAcked; + this.indexFollowingStarted = indexFollowingStarted; + } + + public boolean isFollowIndexCreated() { + return followIndexCreated; + } + + public boolean isFollowIndexShardsAcked() { + return followIndexShardsAcked; + } + + public boolean isIndexFollowingStarted() { + return indexFollowingStarted; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + followIndexCreated = in.readBoolean(); + followIndexShardsAcked = in.readBoolean(); + indexFollowingStarted = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBoolean(followIndexCreated); + out.writeBoolean(followIndexShardsAcked); + out.writeBoolean(indexFollowingStarted); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("follow_index_created", followIndexCreated); + builder.field("follow_index_shards_acked", followIndexShardsAcked); + builder.field("index_following_started", indexFollowingStarted); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Response response = (Response) o; + return followIndexCreated == response.followIndexCreated && + followIndexShardsAcked == response.followIndexShardsAcked && + indexFollowingStarted == response.indexFollowingStarted; + } + + @Override + public int hashCode() { + return Objects.hash(followIndexCreated, followIndexShardsAcked, indexFollowingStarted); + } + } + + public static class TransportAction extends TransportMasterNodeAction { + + private final Client client; + private final AllocationService allocationService; + private final RemoteClusterService remoteClusterService; + private final ActiveShardsObserver activeShardsObserver; + private final CcrLicenseChecker ccrLicenseChecker; + + @Inject + public TransportAction( + final Settings settings, + final ThreadPool threadPool, + final TransportService transportService, + final ClusterService clusterService, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver, + final Client client, + final AllocationService allocationService, + final CcrLicenseChecker ccrLicenseChecker) { + super(settings, NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, Request::new); + this.client = client; + this.allocationService = allocationService; + this.remoteClusterService = transportService.getRemoteClusterService(); + this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool); + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected Response newResponse() { + return new Response(); + } + + @Override + protected void masterOperation( + final Request request, final ClusterState state, final ActionListener listener) throws Exception { + if (ccrLicenseChecker.isCcrAllowed()) { + final String[] indices = new String[]{request.getFollowRequest().getLeaderIndex()}; + final Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); + if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + createFollowerIndexAndFollowLocalIndex(request, state, listener); + } else { + assert remoteClusterIndices.size() == 1; + final Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); + assert entry.getValue().size() == 1; + final String clusterAlias = entry.getKey(); + final String leaderIndex = entry.getValue().get(0); + createFollowerIndexAndFollowRemoteIndex(request, clusterAlias, leaderIndex, listener); + } + } else { + listener.onFailure(LicenseUtils.newComplianceException("ccr")); + } + } + + private void createFollowerIndexAndFollowLocalIndex( + final Request request, final ClusterState state, final ActionListener listener) { + // following an index in local cluster, so use local cluster state to fetch leader index metadata + final IndexMetaData leaderIndexMetadata = state.getMetaData().index(request.getFollowRequest().getLeaderIndex()); + createFollowerIndex(leaderIndexMetadata, request, listener); + } + + private void createFollowerIndexAndFollowRemoteIndex( + final Request request, + final String clusterAlias, + final String leaderIndex, + final ActionListener listener) { + ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( + client, + clusterAlias, + leaderIndex, + listener, + leaderIndexMetaData -> createFollowerIndex(leaderIndexMetaData, request, listener)); + } + + private void createFollowerIndex( + final IndexMetaData leaderIndexMetaData, final Request request, final ActionListener listener) { + if (leaderIndexMetaData == null) { + listener.onFailure(new IllegalArgumentException("leader index [" + request.getFollowRequest().getLeaderIndex() + + "] does not exist")); + return; + } + + ActionListener handler = ActionListener.wrap( + result -> { + if (result) { + initiateFollowing(request, listener); + } else { + listener.onResponse(new Response(true, false, false)); + } + }, + listener::onFailure); + // Can't use create index api here, because then index templates can alter the mappings / settings. + // And index templates could introduce settings / mappings that are incompatible with the leader index. + clusterService.submitStateUpdateTask("follow_index_action", new AckedClusterStateUpdateTask(request, handler) { + + @Override + protected Boolean newResponse(boolean acknowledged) { + return acknowledged; + } + + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + String followIndex = request.getFollowRequest().getFollowerIndex(); + IndexMetaData currentIndex = currentState.metaData().index(followIndex); + if (currentIndex != null) { + throw new ResourceAlreadyExistsException(currentIndex.getIndex()); + } + + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + IndexMetaData.Builder imdBuilder = IndexMetaData.builder(followIndex); + + // Copy all settings, but overwrite a few settings. + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(leaderIndexMetaData.getSettings()); + // Overwriting UUID here, because otherwise we can't follow indices in the same cluster + settingsBuilder.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); + settingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, followIndex); + settingsBuilder.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); + imdBuilder.settings(settingsBuilder); + + // Copy mappings from leader IMD to follow IMD + for (ObjectObjectCursor cursor : leaderIndexMetaData.getMappings()) { + imdBuilder.putMapping(cursor.value); + } + imdBuilder.setRoutingNumShards(leaderIndexMetaData.getRoutingNumShards()); + IndexMetaData followIMD = imdBuilder.build(); + mdBuilder.put(followIMD, false); + + ClusterState.Builder builder = ClusterState.builder(currentState); + builder.metaData(mdBuilder.build()); + ClusterState updatedState = builder.build(); + + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()) + .addAsNew(updatedState.metaData().index(request.getFollowRequest().getFollowerIndex())); + updatedState = allocationService.reroute( + ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(), + "follow index [" + request.getFollowRequest().getFollowerIndex() + "] created"); + + logger.info("[{}] creating index, cause [ccr_create_and_follow], shards [{}]/[{}]", + followIndex, followIMD.getNumberOfShards(), followIMD.getNumberOfReplicas()); + + return updatedState; + } + }); + } + + private void initiateFollowing(Request request, ActionListener listener) { + activeShardsObserver.waitForActiveShards(new String[]{request.followRequest.getFollowerIndex()}, + ActiveShardCount.DEFAULT, request.timeout(), result -> { + if (result) { + client.execute(FollowIndexAction.INSTANCE, request.getFollowRequest(), ActionListener.wrap( + r -> listener.onResponse(new Response(true, true, r.isAcknowledged())), + listener::onFailure + )); + } else { + listener.onResponse(new Response(true, false, false)); + } + }, listener::onFailure); + } + + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.getFollowRequest().getFollowerIndex()); + } + + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java new file mode 100644 index 00000000000..17b7bbe674b --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/FollowIndexAction.java @@ -0,0 +1,571 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexingSlowLog; +import org.elasticsearch.index.SearchSlowLog; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesRequestCache; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; +import org.elasticsearch.xpack.ccr.CcrSettings; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.stream.Collectors; + +public class FollowIndexAction extends Action { + + public static final FollowIndexAction INSTANCE = new FollowIndexAction(); + public static final String NAME = "cluster:admin/xpack/ccr/follow_index"; + + private FollowIndexAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + public static class Request extends ActionRequest implements ToXContentObject { + + private static final ParseField LEADER_INDEX_FIELD = new ParseField("leader_index"); + private static final ParseField FOLLOWER_INDEX_FIELD = new ParseField("follower_index"); + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, true, + (args, followerIndex) -> { + if (args[1] != null) { + followerIndex = (String) args[1]; + } + return new Request((String) args[0], followerIndex, (Integer) args[2], (Integer) args[3], (Long) args[4], + (Integer) args[5], (Integer) args[6], (TimeValue) args[7], (TimeValue) args[8]); + }); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), LEADER_INDEX_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FOLLOWER_INDEX_FIELD); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_BATCH_OPERATION_COUNT); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_CONCURRENT_READ_BATCHES); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_BATCH_SIZE_IN_BYTES); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_CONCURRENT_WRITE_BATCHES); + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), ShardFollowTask.MAX_WRITE_BUFFER_SIZE); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), ShardFollowTask.RETRY_TIMEOUT.getPreferredName()), + ShardFollowTask.RETRY_TIMEOUT, ObjectParser.ValueType.STRING); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), ShardFollowTask.IDLE_SHARD_RETRY_DELAY.getPreferredName()), + ShardFollowTask.IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); + } + + public static Request fromXContent(XContentParser parser, String followerIndex) throws IOException { + Request request = PARSER.parse(parser, followerIndex); + if (followerIndex != null) { + if (request.followerIndex == null) { + request.followerIndex = followerIndex; + } else { + if (request.followerIndex.equals(followerIndex) == false) { + throw new IllegalArgumentException("provided follower_index is not equal"); + } + } + } + return request; + } + + private String leaderIndex; + private String followerIndex; + private int maxBatchOperationCount; + private int maxConcurrentReadBatches; + private long maxOperationSizeInBytes; + private int maxConcurrentWriteBatches; + private int maxWriteBufferSize; + private TimeValue retryTimeout; + private TimeValue idleShardRetryDelay; + + public Request( + String leaderIndex, + String followerIndex, + Integer maxBatchOperationCount, + Integer maxConcurrentReadBatches, + Long maxOperationSizeInBytes, + Integer maxConcurrentWriteBatches, + Integer maxWriteBufferSize, + TimeValue retryTimeout, + TimeValue idleShardRetryDelay) { + + if (leaderIndex == null) { + throw new IllegalArgumentException("leader_index is missing"); + } + if (followerIndex == null) { + throw new IllegalArgumentException("follower_index is missing"); + } + if (maxBatchOperationCount == null) { + maxBatchOperationCount = ShardFollowNodeTask.DEFAULT_MAX_BATCH_OPERATION_COUNT; + } + if (maxConcurrentReadBatches == null) { + maxConcurrentReadBatches = ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_READ_BATCHES; + } + if (maxOperationSizeInBytes == null) { + maxOperationSizeInBytes = ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES; + } + if (maxConcurrentWriteBatches == null) { + maxConcurrentWriteBatches = ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES; + } + if (maxWriteBufferSize == null) { + maxWriteBufferSize = ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE; + } + if (retryTimeout == null) { + retryTimeout = ShardFollowNodeTask.DEFAULT_RETRY_TIMEOUT; + } + if (idleShardRetryDelay == null) { + idleShardRetryDelay = ShardFollowNodeTask.DEFAULT_IDLE_SHARD_RETRY_DELAY; + } + + if (maxBatchOperationCount < 1) { + throw new IllegalArgumentException("maxBatchOperationCount must be larger than 0"); + } + if (maxConcurrentReadBatches < 1) { + throw new IllegalArgumentException("concurrent_processors must be larger than 0"); + } + if (maxOperationSizeInBytes <= 0) { + throw new IllegalArgumentException("processor_max_translog_bytes must be larger than 0"); + } + if (maxConcurrentWriteBatches < 1) { + throw new IllegalArgumentException("maxConcurrentWriteBatches must be larger than 0"); + } + if (maxWriteBufferSize < 1) { + throw new IllegalArgumentException("maxWriteBufferSize must be larger than 0"); + } + + this.leaderIndex = leaderIndex; + this.followerIndex = followerIndex; + this.maxBatchOperationCount = maxBatchOperationCount; + this.maxConcurrentReadBatches = maxConcurrentReadBatches; + this.maxOperationSizeInBytes = maxOperationSizeInBytes; + this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; + this.maxWriteBufferSize = maxWriteBufferSize; + this.retryTimeout = retryTimeout; + this.idleShardRetryDelay = idleShardRetryDelay; + } + + Request() { + } + + public String getLeaderIndex() { + return leaderIndex; + } + + public String getFollowerIndex() { + return followerIndex; + } + + public int getMaxBatchOperationCount() { + return maxBatchOperationCount; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + leaderIndex = in.readString(); + followerIndex = in.readString(); + maxBatchOperationCount = in.readVInt(); + maxConcurrentReadBatches = in.readVInt(); + maxOperationSizeInBytes = in.readVLong(); + maxConcurrentWriteBatches = in.readVInt(); + maxWriteBufferSize = in.readVInt(); + retryTimeout = in.readOptionalTimeValue(); + idleShardRetryDelay = in.readOptionalTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(leaderIndex); + out.writeString(followerIndex); + out.writeVInt(maxBatchOperationCount); + out.writeVInt(maxConcurrentReadBatches); + out.writeVLong(maxOperationSizeInBytes); + out.writeVInt(maxConcurrentWriteBatches); + out.writeVInt(maxWriteBufferSize); + out.writeOptionalTimeValue(retryTimeout); + out.writeOptionalTimeValue(idleShardRetryDelay); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field(LEADER_INDEX_FIELD.getPreferredName(), leaderIndex); + builder.field(FOLLOWER_INDEX_FIELD.getPreferredName(), followerIndex); + builder.field(ShardFollowTask.MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); + builder.field(ShardFollowTask.MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxOperationSizeInBytes); + builder.field(ShardFollowTask.MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); + builder.field(ShardFollowTask.MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); + builder.field(ShardFollowTask.MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); + builder.field(ShardFollowTask.RETRY_TIMEOUT.getPreferredName(), retryTimeout.getStringRep()); + builder.field(ShardFollowTask.IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Request request = (Request) o; + return maxBatchOperationCount == request.maxBatchOperationCount && + maxConcurrentReadBatches == request.maxConcurrentReadBatches && + maxOperationSizeInBytes == request.maxOperationSizeInBytes && + maxConcurrentWriteBatches == request.maxConcurrentWriteBatches && + maxWriteBufferSize == request.maxWriteBufferSize && + Objects.equals(retryTimeout, request.retryTimeout) && + Objects.equals(idleShardRetryDelay, request.idleShardRetryDelay) && + Objects.equals(leaderIndex, request.leaderIndex) && + Objects.equals(followerIndex, request.followerIndex); + } + + @Override + public int hashCode() { + return Objects.hash( + leaderIndex, + followerIndex, + maxBatchOperationCount, + maxConcurrentReadBatches, + maxOperationSizeInBytes, + maxConcurrentWriteBatches, + maxWriteBufferSize, + retryTimeout, + idleShardRetryDelay + ); + } + } + + public static class TransportAction extends HandledTransportAction { + + private final Client client; + private final ThreadPool threadPool; + private final ClusterService clusterService; + private final RemoteClusterService remoteClusterService; + private final PersistentTasksService persistentTasksService; + private final IndicesService indicesService; + private final CcrLicenseChecker ccrLicenseChecker; + + @Inject + public TransportAction( + final Settings settings, + final ThreadPool threadPool, + final TransportService transportService, + final ActionFilters actionFilters, + final Client client, + final ClusterService clusterService, + final PersistentTasksService persistentTasksService, + final IndicesService indicesService, + final CcrLicenseChecker ccrLicenseChecker) { + super(settings, NAME, transportService, actionFilters, Request::new); + this.client = client; + this.threadPool = threadPool; + this.clusterService = clusterService; + this.remoteClusterService = transportService.getRemoteClusterService(); + this.persistentTasksService = persistentTasksService; + this.indicesService = indicesService; + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + } + + @Override + protected void doExecute(final Task task, + final Request request, + final ActionListener listener) { + if (ccrLicenseChecker.isCcrAllowed()) { + final String[] indices = new String[]{request.leaderIndex}; + final Map> remoteClusterIndices = remoteClusterService.groupClusterIndices(indices, s -> false); + if (remoteClusterIndices.containsKey(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY)) { + followLocalIndex(request, listener); + } else { + assert remoteClusterIndices.size() == 1; + final Map.Entry> entry = remoteClusterIndices.entrySet().iterator().next(); + assert entry.getValue().size() == 1; + final String clusterAlias = entry.getKey(); + final String leaderIndex = entry.getValue().get(0); + followRemoteIndex(request, clusterAlias, leaderIndex, listener); + } + } else { + listener.onFailure(LicenseUtils.newComplianceException("ccr")); + } + } + + private void followLocalIndex(final Request request, + final ActionListener listener) { + final ClusterState state = clusterService.state(); + final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); + // following an index in local cluster, so use local cluster state to fetch leader index metadata + final IndexMetaData leaderIndexMetadata = state.getMetaData().index(request.getLeaderIndex()); + try { + start(request, null, leaderIndexMetadata, followerIndexMetadata, listener); + } catch (final IOException e) { + listener.onFailure(e); + } + } + + private void followRemoteIndex( + final Request request, + final String clusterAlias, + final String leaderIndex, + final ActionListener listener) { + final ClusterState state = clusterService.state(); + final IndexMetaData followerIndexMetadata = state.getMetaData().index(request.getFollowerIndex()); + ccrLicenseChecker.checkRemoteClusterLicenseAndFetchLeaderIndexMetadata( + client, + clusterAlias, + leaderIndex, + listener, + leaderIndexMetadata -> { + try { + start(request, clusterAlias, leaderIndexMetadata, followerIndexMetadata, listener); + } catch (final IOException e) { + listener.onFailure(e); + } + }); + } + + /** + * Performs validation on the provided leader and follow {@link IndexMetaData} instances and then + * creates a persistent task for each leader primary shard. This persistent tasks track changes in the leader + * shard and replicate these changes to a follower shard. + * + * Currently the following validation is performed: + *
    + *
  • The leader index and follow index need to have the same number of primary shards
  • + *
+ */ + void start( + Request request, + String clusterNameAlias, + IndexMetaData leaderIndexMetadata, + IndexMetaData followIndexMetadata, + ActionListener handler) throws IOException { + + MapperService mapperService = followIndexMetadata != null ? indicesService.createIndexMapperService(followIndexMetadata) : null; + validate(request, leaderIndexMetadata, followIndexMetadata, mapperService); + final int numShards = followIndexMetadata.getNumberOfShards(); + final AtomicInteger counter = new AtomicInteger(numShards); + final AtomicReferenceArray responses = new AtomicReferenceArray<>(followIndexMetadata.getNumberOfShards()); + Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() + .filter(e -> ShardFollowTask.HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));for (int i = 0; i < numShards; i++) { + final int shardId = i; + String taskId = followIndexMetadata.getIndexUUID() + "-" + shardId; + + ShardFollowTask shardFollowTask = new ShardFollowTask(clusterNameAlias, + new ShardId(followIndexMetadata.getIndex(), shardId), + new ShardId(leaderIndexMetadata.getIndex(), shardId), + request.maxBatchOperationCount, request.maxConcurrentReadBatches, request.maxOperationSizeInBytes, + request.maxConcurrentWriteBatches, request.maxWriteBufferSize, request.retryTimeout, + request.idleShardRetryDelay, filteredHeaders); + persistentTasksService.sendStartRequest(taskId, ShardFollowTask.NAME, shardFollowTask, + new ActionListener>() { + @Override + public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { + responses.set(shardId, task); + finalizeResponse(); + } + + @Override + public void onFailure(Exception e) { + responses.set(shardId, e); + finalizeResponse(); + } + + void finalizeResponse() { + Exception error = null; + if (counter.decrementAndGet() == 0) { + for (int j = 0; j < responses.length(); j++) { + Object response = responses.get(j); + if (response instanceof Exception) { + if (error == null) { + error = (Exception) response; + } else { + error.addSuppressed((Throwable) response); + } + } + } + + if (error == null) { + // include task ids? + handler.onResponse(new AcknowledgedResponse(true)); + } else { + // TODO: cancel all started tasks + handler.onFailure(error); + } + } + } + } + ); + } + } + } + + private static final Set> WHITELISTED_SETTINGS; + + static { + Set> whiteListedSettings = new HashSet<>(); + whiteListedSettings.add(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING); + whiteListedSettings.add(IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING); + + whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING); + whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING); + whiteListedSettings.add(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING); + whiteListedSettings.add(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING); + whiteListedSettings.add(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING); + whiteListedSettings.add(ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING); + + whiteListedSettings.add(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING); + whiteListedSettings.add(IndexSettings.MAX_RESULT_WINDOW_SETTING); + whiteListedSettings.add(IndexSettings.INDEX_WARMER_ENABLED_SETTING); + whiteListedSettings.add(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING); + whiteListedSettings.add(IndexSettings.MAX_RESCORE_WINDOW_SETTING); + whiteListedSettings.add(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING); + whiteListedSettings.add(IndexSettings.DEFAULT_FIELD_SETTING); + whiteListedSettings.add(IndexSettings.QUERY_STRING_LENIENT_SETTING); + whiteListedSettings.add(IndexSettings.QUERY_STRING_ANALYZE_WILDCARD); + whiteListedSettings.add(IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD); + whiteListedSettings.add(IndexSettings.ALLOW_UNMAPPED); + whiteListedSettings.add(IndexSettings.INDEX_SEARCH_IDLE_AFTER); + whiteListedSettings.add(BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING); + + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING); + whiteListedSettings.add(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING); + whiteListedSettings.add(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_MAX_SOURCE_CHARS_TO_LOG_SETTING); + + whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_SETTING); + whiteListedSettings.add(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING); + + WHITELISTED_SETTINGS = Collections.unmodifiableSet(whiteListedSettings); + } + + static void validate(Request request, + IndexMetaData leaderIndex, + IndexMetaData followIndex, MapperService followerMapperService) { + if (leaderIndex == null) { + throw new IllegalArgumentException("leader index [" + request.leaderIndex + "] does not exist"); + } + if (followIndex == null) { + throw new IllegalArgumentException("follow index [" + request.followerIndex + "] does not exist"); + } + if (leaderIndex.getSettings().getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false) == false) { + throw new IllegalArgumentException("leader index [" + request.leaderIndex + "] does not have soft deletes enabled"); + } + if (leaderIndex.getNumberOfShards() != followIndex.getNumberOfShards()) { + throw new IllegalArgumentException("leader index primary shards [" + leaderIndex.getNumberOfShards() + + "] does not match with the number of shards of the follow index [" + followIndex.getNumberOfShards() + "]"); + } + if (leaderIndex.getRoutingNumShards() != followIndex.getRoutingNumShards()) { + throw new IllegalArgumentException("leader index number_of_routing_shards [" + leaderIndex.getRoutingNumShards() + + "] does not match with the number_of_routing_shards of the follow index [" + followIndex.getRoutingNumShards() + "]"); + } + if (leaderIndex.getState() != IndexMetaData.State.OPEN || followIndex.getState() != IndexMetaData.State.OPEN) { + throw new IllegalArgumentException("leader and follow index must be open"); + } + if (CcrSettings.CCR_FOLLOWING_INDEX_SETTING.get(followIndex.getSettings()) == false) { + throw new IllegalArgumentException("the following index [" + request.followerIndex + "] is not ready " + + "to follow; the setting [" + CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey() + "] must be enabled."); + } + // Make a copy, remove settings that are allowed to be different and then compare if the settings are equal. + Settings leaderSettings = filter(leaderIndex.getSettings()); + Settings followerSettings = filter(followIndex.getSettings()); + if (leaderSettings.equals(followerSettings) == false) { + throw new IllegalArgumentException("the leader and follower index settings must be identical"); + } + + // Validates if the current follower mapping is mergable with the leader mapping. + // This also validates for example whether specific mapper plugins have been installed + followerMapperService.merge(leaderIndex, MapperService.MergeReason.MAPPING_RECOVERY); + } + + private static Settings filter(Settings originalSettings) { + Settings.Builder settings = Settings.builder().put(originalSettings); + // Remove settings that are always going to be different between leader and follow index: + settings.remove(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey()); + settings.remove(IndexMetaData.SETTING_INDEX_UUID); + settings.remove(IndexMetaData.SETTING_INDEX_PROVIDED_NAME); + settings.remove(IndexMetaData.SETTING_CREATION_DATE); + + Iterator iterator = settings.keys().iterator(); + while (iterator.hasNext()) { + String key = iterator.next(); + for (Setting whitelistedSetting : WHITELISTED_SETTINGS) { + if (whitelistedSetting.match(key)) { + iterator.remove(); + break; + } + } + } + return settings.build(); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java new file mode 100644 index 00000000000..b505ee015ba --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -0,0 +1,320 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.single.shard.SingleShardRequest; +import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardNotStartedException; +import org.elasticsearch.index.shard.IndexShardState; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.action.ValidateActions.addValidationError; + +public class ShardChangesAction extends Action { + + public static final ShardChangesAction INSTANCE = new ShardChangesAction(); + public static final String NAME = "indices:data/read/xpack/ccr/shard_changes"; + + private ShardChangesAction() { + super(NAME); + } + + @Override + public Response newResponse() { + return new Response(); + } + + public static class Request extends SingleShardRequest { + + private long fromSeqNo; + private int maxOperationCount; + private ShardId shardId; + private long maxOperationSizeInBytes = ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES; + + public Request(ShardId shardId) { + super(shardId.getIndexName()); + this.shardId = shardId; + } + + Request() { + } + + public ShardId getShard() { + return shardId; + } + + public long getFromSeqNo() { + return fromSeqNo; + } + + public void setFromSeqNo(long fromSeqNo) { + this.fromSeqNo = fromSeqNo; + } + + public int getMaxOperationCount() { + return maxOperationCount; + } + + public void setMaxOperationCount(int maxOperationCount) { + this.maxOperationCount = maxOperationCount; + } + + public long getMaxOperationSizeInBytes() { + return maxOperationSizeInBytes; + } + + public void setMaxOperationSizeInBytes(long maxOperationSizeInBytes) { + this.maxOperationSizeInBytes = maxOperationSizeInBytes; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (fromSeqNo < 0) { + validationException = addValidationError("fromSeqNo [" + fromSeqNo + "] cannot be lower than 0", validationException); + } + if (maxOperationCount < 0) { + validationException = addValidationError("maxOperationCount [" + maxOperationCount + + "] cannot be lower than 0", validationException); + } + if (maxOperationSizeInBytes <= 0) { + validationException = addValidationError("maxOperationSizeInBytes [" + maxOperationSizeInBytes + "] must be larger than 0", + validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + fromSeqNo = in.readVLong(); + maxOperationCount = in.readVInt(); + shardId = ShardId.readShardId(in); + maxOperationSizeInBytes = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(fromSeqNo); + out.writeVInt(maxOperationCount); + shardId.writeTo(out); + out.writeVLong(maxOperationSizeInBytes); + } + + + @Override + public boolean equals(final Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Request request = (Request) o; + return fromSeqNo == request.fromSeqNo && + maxOperationCount == request.maxOperationCount && + Objects.equals(shardId, request.shardId) && + maxOperationSizeInBytes == request.maxOperationSizeInBytes; + } + + @Override + public int hashCode() { + return Objects.hash(fromSeqNo, maxOperationCount, shardId, maxOperationSizeInBytes); + } + + @Override + public String toString() { + return "Request{" + + "fromSeqNo=" + fromSeqNo + + ", maxOperationCount=" + maxOperationCount + + ", shardId=" + shardId + + ", maxOperationSizeInBytes=" + maxOperationSizeInBytes + + '}'; + } + + } + + public static final class Response extends ActionResponse { + + private long mappingVersion; + + public long getMappingVersion() { + return mappingVersion; + } + + private long globalCheckpoint; + + public long getGlobalCheckpoint() { + return globalCheckpoint; + } + + private long maxSeqNo; + + public long getMaxSeqNo() { + return maxSeqNo; + } + + private Translog.Operation[] operations; + + public Translog.Operation[] getOperations() { + return operations; + } + + Response() { + } + + Response(final long mappingVersion, final long globalCheckpoint, final long maxSeqNo, final Translog.Operation[] operations) { + this.mappingVersion = mappingVersion; + this.globalCheckpoint = globalCheckpoint; + this.maxSeqNo = maxSeqNo; + this.operations = operations; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + mappingVersion = in.readVLong(); + globalCheckpoint = in.readZLong(); + maxSeqNo = in.readZLong(); + operations = in.readArray(Translog.Operation::readOperation, Translog.Operation[]::new); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVLong(mappingVersion); + out.writeZLong(globalCheckpoint); + out.writeZLong(maxSeqNo); + out.writeArray(Translog.Operation::writeOperation, operations); + } + + @Override + public boolean equals(final Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Response that = (Response) o; + return mappingVersion == that.mappingVersion && + globalCheckpoint == that.globalCheckpoint && + maxSeqNo == that.maxSeqNo && + Arrays.equals(operations, that.operations); + } + + @Override + public int hashCode() { + return Objects.hash(mappingVersion, globalCheckpoint, maxSeqNo, Arrays.hashCode(operations)); + } + } + + public static class TransportAction extends TransportSingleShardAction { + + private final IndicesService indicesService; + + @Inject + public TransportAction(Settings settings, + ThreadPool threadPool, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + IndicesService indicesService) { + super(settings, NAME, threadPool, clusterService, transportService, actionFilters, + indexNameExpressionResolver, Request::new, ThreadPool.Names.GET); + this.indicesService = indicesService; + } + + @Override + protected Response shardOperation(Request request, ShardId shardId) throws IOException { + IndexService indexService = indicesService.indexServiceSafe(request.getShard().getIndex()); + IndexShard indexShard = indexService.getShard(request.getShard().id()); + final SeqNoStats seqNoStats = indexShard.seqNoStats(); + final long mappingVersion = clusterService.state().metaData().index(shardId.getIndex()).getMappingVersion(); + + final Translog.Operation[] operations = getOperations( + indexShard, + seqNoStats.getGlobalCheckpoint(), + request.fromSeqNo, + request.maxOperationCount, + request.maxOperationSizeInBytes); + return new Response(mappingVersion, seqNoStats.getGlobalCheckpoint(), seqNoStats.getMaxSeqNo(), operations); + } + + @Override + protected boolean resolveIndex(Request request) { + return false; + } + + @Override + protected ShardsIterator shards(ClusterState state, InternalRequest request) { + return state + .routingTable() + .shardRoutingTable(request.concreteIndex(), request.request().getShard().id()) + .activeInitializingShardsRandomIt(); + } + + @Override + protected Response newResponse() { + return new Response(); + } + + } + + private static final Translog.Operation[] EMPTY_OPERATIONS_ARRAY = new Translog.Operation[0]; + + /** + * Returns at most maxOperationCount operations from the specified from sequence number. + * This method will never return operations above the specified globalCheckpoint. + * + * Also if the sum of collected operations' size is above the specified maxOperationSizeInBytes then this method + * stops collecting more operations and returns what has been collected so far. + */ + static Translog.Operation[] getOperations(IndexShard indexShard, long globalCheckpoint, long fromSeqNo, int maxOperationCount, + long maxOperationSizeInBytes) throws IOException { + if (indexShard.state() != IndexShardState.STARTED) { + throw new IndexShardNotStartedException(indexShard.shardId(), indexShard.state()); + } + if (fromSeqNo > indexShard.getGlobalCheckpoint()) { + return EMPTY_OPERATIONS_ARRAY; + } + int seenBytes = 0; + // - 1 is needed, because toSeqNo is inclusive + long toSeqNo = Math.min(globalCheckpoint, (fromSeqNo + maxOperationCount) - 1); + final List operations = new ArrayList<>(); + try (Translog.Snapshot snapshot = indexShard.newChangesSnapshot("ccr", fromSeqNo, toSeqNo, true)) { + Translog.Operation op; + while ((op = snapshot.next()) != null) { + operations.add(op); + seenBytes += op.estimateSize(); + if (seenBytes > maxOperationSizeInBytes) { + break; + } + } + } + return operations.toArray(EMPTY_OPERATIONS_ARRAY); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java new file mode 100644 index 00000000000..00e3aaaae2a --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTask.java @@ -0,0 +1,929 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.support.TransportActions; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.transport.NetworkExceptionHelper; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; + +import java.io.IOException; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Objects; +import java.util.PriorityQueue; +import java.util.Queue; +import java.util.TreeMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; +import java.util.stream.Collectors; + +/** + * The node task that fetch the write operations from a leader shard and + * persists these ops in the follower shard. + */ +public abstract class ShardFollowNodeTask extends AllocatedPersistentTask { + + public static final int DEFAULT_MAX_BATCH_OPERATION_COUNT = 1024; + public static final int DEFAULT_MAX_CONCURRENT_READ_BATCHES = 1; + public static final int DEFAULT_MAX_CONCURRENT_WRITE_BATCHES = 1; + public static final int DEFAULT_MAX_WRITE_BUFFER_SIZE = 10240; + public static final long DEFAULT_MAX_BATCH_SIZE_IN_BYTES = Long.MAX_VALUE; + private static final int RETRY_LIMIT = 10; + public static final TimeValue DEFAULT_RETRY_TIMEOUT = new TimeValue(500); + public static final TimeValue DEFAULT_IDLE_SHARD_RETRY_DELAY = TimeValue.timeValueSeconds(10); + + private static final Logger LOGGER = Loggers.getLogger(ShardFollowNodeTask.class); + + private final String leaderIndex; + private final ShardFollowTask params; + private final TimeValue retryTimeout; + private final TimeValue idleShardChangesRequestDelay; + private final BiConsumer scheduler; + private final LongSupplier relativeTimeProvider; + + private long leaderGlobalCheckpoint; + private long leaderMaxSeqNo; + private long lastRequestedSeqNo; + private long followerGlobalCheckpoint = 0; + private long followerMaxSeqNo = 0; + private int numConcurrentReads = 0; + private int numConcurrentWrites = 0; + private long currentMappingVersion = 0; + private long totalFetchTimeMillis = 0; + private long numberOfSuccessfulFetches = 0; + private long numberOfFailedFetches = 0; + private long operationsReceived = 0; + private long totalTransferredBytes = 0; + private long totalIndexTimeMillis = 0; + private long numberOfSuccessfulBulkOperations = 0; + private long numberOfFailedBulkOperations = 0; + private long numberOfOperationsIndexed = 0; + private long lastFetchTime = -1; + private final Queue buffer = new PriorityQueue<>(Comparator.comparing(Translog.Operation::seqNo)); + private final LinkedHashMap fetchExceptions; + + ShardFollowNodeTask(long id, String type, String action, String description, TaskId parentTask, Map headers, + ShardFollowTask params, BiConsumer scheduler, final LongSupplier relativeTimeProvider) { + super(id, type, action, description, parentTask, headers); + this.params = params; + this.scheduler = scheduler; + this.relativeTimeProvider = relativeTimeProvider; + this.retryTimeout = params.getRetryTimeout(); + this.idleShardChangesRequestDelay = params.getIdleShardRetryDelay(); + /* + * We keep track of the most recent fetch exceptions, with the number of exceptions that we track equal to the maximum number of + * concurrent fetches. For each failed fetch, we track the from sequence number associated with the request, and we clear the entry + * when the fetch task associated with that from sequence number succeeds. + */ + this.fetchExceptions = new LinkedHashMap() { + @Override + protected boolean removeEldestEntry(final Map.Entry eldest) { + return size() > params.getMaxConcurrentReadBatches(); + } + }; + + if (params.getLeaderClusterAlias() != null) { + leaderIndex = params.getLeaderClusterAlias() + ":" + params.getLeaderShardId().getIndexName(); + } else { + leaderIndex = params.getLeaderShardId().getIndexName(); + } + } + + void start( + final long leaderGlobalCheckpoint, + final long leaderMaxSeqNo, + final long followerGlobalCheckpoint, + final long followerMaxSeqNo) { + /* + * While this should only ever be called once and before any other threads can touch these fields, we use synchronization here to + * avoid the need to declare these fields as volatile. That is, we are ensuring thesefields are always accessed under the same lock. + */ + synchronized (this) { + this.leaderGlobalCheckpoint = leaderGlobalCheckpoint; + this.leaderMaxSeqNo = leaderMaxSeqNo; + this.followerGlobalCheckpoint = followerGlobalCheckpoint; + this.followerMaxSeqNo = followerMaxSeqNo; + this.lastRequestedSeqNo = followerGlobalCheckpoint; + } + + // updates follower mapping, this gets us the leader mapping version and makes sure that leader and follower mapping are identical + updateMapping(mappingVersion -> { + synchronized (ShardFollowNodeTask.this) { + currentMappingVersion = mappingVersion; + } + LOGGER.info("{} Started to follow leader shard {}, followGlobalCheckPoint={}, mappingVersion={}", + params.getFollowShardId(), params.getLeaderShardId(), followerGlobalCheckpoint, mappingVersion); + coordinateReads(); + }); + } + + synchronized void coordinateReads() { + if (isStopped()) { + LOGGER.info("{} shard follow task has been stopped", params.getFollowShardId()); + return; + } + + LOGGER.trace("{} coordinate reads, lastRequestedSeqNo={}, leaderGlobalCheckpoint={}", + params.getFollowShardId(), lastRequestedSeqNo, leaderGlobalCheckpoint); + final int maxBatchOperationCount = params.getMaxBatchOperationCount(); + while (hasReadBudget() && lastRequestedSeqNo < leaderGlobalCheckpoint) { + final long from = lastRequestedSeqNo + 1; + final long maxRequiredSeqNo = Math.min(leaderGlobalCheckpoint, from + maxBatchOperationCount - 1); + final int requestBatchCount; + if (numConcurrentReads == 0) { + // This is the only request, we can optimistically fetch more documents if possible but not enforce max_required_seqno. + requestBatchCount = maxBatchOperationCount; + } else { + requestBatchCount = Math.toIntExact(maxRequiredSeqNo - from + 1); + } + assert 0 < requestBatchCount && requestBatchCount <= maxBatchOperationCount : "request_batch_count=" + requestBatchCount; + LOGGER.trace("{}[{} ongoing reads] read from_seqno={} max_required_seqno={} batch_count={}", + params.getFollowShardId(), numConcurrentReads, from, maxRequiredSeqNo, requestBatchCount); + numConcurrentReads++; + sendShardChangesRequest(from, requestBatchCount, maxRequiredSeqNo); + lastRequestedSeqNo = maxRequiredSeqNo; + } + + if (numConcurrentReads == 0 && hasReadBudget()) { + assert lastRequestedSeqNo == leaderGlobalCheckpoint; + // We sneak peek if there is any thing new in the leader. + // If there is we will happily accept + numConcurrentReads++; + long from = lastRequestedSeqNo + 1; + LOGGER.trace("{}[{}] peek read [{}]", params.getFollowShardId(), numConcurrentReads, from); + sendShardChangesRequest(from, maxBatchOperationCount, lastRequestedSeqNo); + } + } + + private boolean hasReadBudget() { + assert Thread.holdsLock(this); + if (numConcurrentReads >= params.getMaxConcurrentReadBatches()) { + LOGGER.trace("{} no new reads, maximum number of concurrent reads have been reached [{}]", + params.getFollowShardId(), numConcurrentReads); + return false; + } + if (buffer.size() > params.getMaxWriteBufferSize()) { + LOGGER.trace("{} no new reads, buffer limit has been reached [{}]", params.getFollowShardId(), buffer.size()); + return false; + } + return true; + } + + private synchronized void coordinateWrites() { + if (isStopped()) { + LOGGER.info("{} shard follow task has been stopped", params.getFollowShardId()); + return; + } + + while (hasWriteBudget() && buffer.isEmpty() == false) { + long sumEstimatedSize = 0L; + int length = Math.min(params.getMaxBatchOperationCount(), buffer.size()); + List ops = new ArrayList<>(length); + for (int i = 0; i < length; i++) { + Translog.Operation op = buffer.remove(); + ops.add(op); + sumEstimatedSize += op.estimateSize(); + if (sumEstimatedSize > params.getMaxBatchSizeInBytes()) { + break; + } + } + numConcurrentWrites++; + LOGGER.trace("{}[{}] write [{}/{}] [{}]", params.getFollowShardId(), numConcurrentWrites, ops.get(0).seqNo(), + ops.get(ops.size() - 1).seqNo(), ops.size()); + sendBulkShardOperationsRequest(ops); + } + } + + private boolean hasWriteBudget() { + assert Thread.holdsLock(this); + if (numConcurrentWrites >= params.getMaxConcurrentWriteBatches()) { + LOGGER.trace("{} maximum number of concurrent writes have been reached [{}]", + params.getFollowShardId(), numConcurrentWrites); + return false; + } + return true; + } + + private void sendShardChangesRequest(long from, int maxOperationCount, long maxRequiredSeqNo) { + sendShardChangesRequest(from, maxOperationCount, maxRequiredSeqNo, new AtomicInteger(0)); + } + + private void sendShardChangesRequest(long from, int maxOperationCount, long maxRequiredSeqNo, AtomicInteger retryCounter) { + final long startTime = relativeTimeProvider.getAsLong(); + synchronized (this) { + lastFetchTime = startTime; + } + innerSendShardChangesRequest(from, maxOperationCount, + response -> { + synchronized (ShardFollowNodeTask.this) { + totalFetchTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); + numberOfSuccessfulFetches++; + fetchExceptions.remove(from); + operationsReceived += response.getOperations().length; + totalTransferredBytes += Arrays.stream(response.getOperations()).mapToLong(Translog.Operation::estimateSize).sum(); + } + handleReadResponse(from, maxRequiredSeqNo, response); + }, + e -> { + synchronized (ShardFollowNodeTask.this) { + totalFetchTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); + numberOfFailedFetches++; + fetchExceptions.put(from, new ElasticsearchException(e)); + } + handleFailure(e, retryCounter, () -> sendShardChangesRequest(from, maxOperationCount, maxRequiredSeqNo, retryCounter)); + }); + } + + void handleReadResponse(long from, long maxRequiredSeqNo, ShardChangesAction.Response response) { + maybeUpdateMapping(response.getMappingVersion(), () -> innerHandleReadResponse(from, maxRequiredSeqNo, response)); + } + + /** Called when some operations are fetched from the leading */ + protected void onOperationsFetched(Translog.Operation[] operations) { + + } + + synchronized void innerHandleReadResponse(long from, long maxRequiredSeqNo, ShardChangesAction.Response response) { + onOperationsFetched(response.getOperations()); + leaderGlobalCheckpoint = Math.max(leaderGlobalCheckpoint, response.getGlobalCheckpoint()); + leaderMaxSeqNo = Math.max(leaderMaxSeqNo, response.getMaxSeqNo()); + final long newFromSeqNo; + if (response.getOperations().length == 0) { + newFromSeqNo = from; + } else { + assert response.getOperations()[0].seqNo() == from : + "first operation is not what we asked for. From is [" + from + "], got " + response.getOperations()[0]; + buffer.addAll(Arrays.asList(response.getOperations())); + final long maxSeqNo = response.getOperations()[response.getOperations().length - 1].seqNo(); + assert maxSeqNo == + Arrays.stream(response.getOperations()).mapToLong(Translog.Operation::seqNo).max().getAsLong(); + newFromSeqNo = maxSeqNo + 1; + // update last requested seq no as we may have gotten more than we asked for and we don't want to ask it again. + lastRequestedSeqNo = Math.max(lastRequestedSeqNo, maxSeqNo); + assert lastRequestedSeqNo <= leaderGlobalCheckpoint : "lastRequestedSeqNo [" + lastRequestedSeqNo + + "] is larger than the global checkpoint [" + leaderGlobalCheckpoint + "]"; + coordinateWrites(); + } + if (newFromSeqNo <= maxRequiredSeqNo && isStopped() == false) { + int newSize = Math.toIntExact(maxRequiredSeqNo - newFromSeqNo + 1); + LOGGER.trace("{} received [{}] ops, still missing [{}/{}], continuing to read...", + params.getFollowShardId(), response.getOperations().length, newFromSeqNo, maxRequiredSeqNo); + sendShardChangesRequest(newFromSeqNo, newSize, maxRequiredSeqNo); + } else { + // read is completed, decrement + numConcurrentReads--; + if (response.getOperations().length == 0 && leaderGlobalCheckpoint == lastRequestedSeqNo) { + // we got nothing and we have no reason to believe asking again well get us more, treat shard as idle and delay + // future requests + LOGGER.trace("{} received no ops and no known ops to fetch, scheduling to coordinate reads", + params.getFollowShardId()); + scheduler.accept(idleShardChangesRequestDelay, this::coordinateReads); + } else { + coordinateReads(); + } + } + } + + private void sendBulkShardOperationsRequest(List operations) { + sendBulkShardOperationsRequest(operations, new AtomicInteger(0)); + } + + private void sendBulkShardOperationsRequest(List operations, AtomicInteger retryCounter) { + final long startTime = relativeTimeProvider.getAsLong(); + innerSendBulkShardOperationsRequest(operations, + response -> { + synchronized (ShardFollowNodeTask.this) { + totalIndexTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); + numberOfSuccessfulBulkOperations++; + numberOfOperationsIndexed += operations.size(); + } + handleWriteResponse(response); + }, + e -> { + synchronized (ShardFollowNodeTask.this) { + totalIndexTimeMillis += TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTime); + numberOfFailedBulkOperations++; + } + handleFailure(e, retryCounter, () -> sendBulkShardOperationsRequest(operations, retryCounter)); + } + ); + } + + private synchronized void handleWriteResponse(final BulkShardOperationsResponse response) { + this.followerGlobalCheckpoint = Math.max(this.followerGlobalCheckpoint, response.getGlobalCheckpoint()); + this.followerMaxSeqNo = Math.max(this.followerMaxSeqNo, response.getMaxSeqNo()); + numConcurrentWrites--; + assert numConcurrentWrites >= 0; + coordinateWrites(); + + // In case that buffer has more ops than is allowed then reads may all have been stopped, + // this invocation makes sure that we start a read when there is budget in case no reads are being performed. + coordinateReads(); + } + + private synchronized void maybeUpdateMapping(Long minimumRequiredMappingVersion, Runnable task) { + if (currentMappingVersion >= minimumRequiredMappingVersion) { + LOGGER.trace("{} mapping version [{}] is higher or equal than minimum required mapping version [{}]", + params.getFollowShardId(), currentMappingVersion, minimumRequiredMappingVersion); + task.run(); + } else { + LOGGER.trace("{} updating mapping, mapping version [{}] is lower than minimum required mapping version [{}]", + params.getFollowShardId(), currentMappingVersion, minimumRequiredMappingVersion); + updateMapping(mappingVersion -> { + currentMappingVersion = mappingVersion; + task.run(); + }); + } + } + + private void updateMapping(LongConsumer handler) { + updateMapping(handler, new AtomicInteger(0)); + } + + private void updateMapping(LongConsumer handler, AtomicInteger retryCounter) { + innerUpdateMapping(handler, e -> handleFailure(e, retryCounter, () -> updateMapping(handler, retryCounter))); + } + + private void handleFailure(Exception e, AtomicInteger retryCounter, Runnable task) { + assert e != null; + if (shouldRetry(e)) { + if (isStopped() == false && retryCounter.incrementAndGet() <= RETRY_LIMIT) { + LOGGER.debug(new ParameterizedMessage("{} error during follow shard task, retrying...", params.getFollowShardId()), e); + scheduler.accept(retryTimeout, task); + } else { + markAsFailed(new ElasticsearchException("retrying failed [" + retryCounter.get() + + "] times, aborting...", e)); + } + } else { + markAsFailed(e); + } + } + + private boolean shouldRetry(Exception e) { + return NetworkExceptionHelper.isConnectException(e) || + NetworkExceptionHelper.isCloseConnectionException(e) || + TransportActions.isShardNotAvailableException(e); + } + + // These methods are protected for testing purposes: + protected abstract void innerUpdateMapping(LongConsumer handler, Consumer errorHandler); + + protected abstract void innerSendBulkShardOperationsRequest( + List operations, Consumer handler, Consumer errorHandler); + + protected abstract void innerSendShardChangesRequest(long from, int maxOperationCount, Consumer handler, + Consumer errorHandler); + + @Override + protected void onCancelled() { + markAsCompleted(); + } + + protected boolean isStopped() { + return isCancelled() || isCompleted(); + } + + public ShardId getFollowShardId() { + return params.getFollowShardId(); + } + + @Override + public synchronized Status getStatus() { + final long timeSinceLastFetchMillis; + if (lastFetchTime != -1) { + timeSinceLastFetchMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - lastFetchTime); + } else { + // To avoid confusion when ccr didn't yet execute a fetch: + timeSinceLastFetchMillis = -1; + } + return new Status( + leaderIndex, + getFollowShardId().getId(), + leaderGlobalCheckpoint, + leaderMaxSeqNo, + followerGlobalCheckpoint, + followerMaxSeqNo, + lastRequestedSeqNo, + numConcurrentReads, + numConcurrentWrites, + buffer.size(), + currentMappingVersion, + totalFetchTimeMillis, + numberOfSuccessfulFetches, + numberOfFailedFetches, + operationsReceived, + totalTransferredBytes, + totalIndexTimeMillis, + numberOfSuccessfulBulkOperations, + numberOfFailedBulkOperations, + numberOfOperationsIndexed, + new TreeMap<>(fetchExceptions), + timeSinceLastFetchMillis); + } + + public static class Status implements Task.Status { + + public static final String STATUS_PARSER_NAME = "shard-follow-node-task-status"; + + static final ParseField LEADER_INDEX = new ParseField("leader_index"); + static final ParseField SHARD_ID = new ParseField("shard_id"); + static final ParseField LEADER_GLOBAL_CHECKPOINT_FIELD = new ParseField("leader_global_checkpoint"); + static final ParseField LEADER_MAX_SEQ_NO_FIELD = new ParseField("leader_max_seq_no"); + static final ParseField FOLLOWER_GLOBAL_CHECKPOINT_FIELD = new ParseField("follower_global_checkpoint"); + static final ParseField FOLLOWER_MAX_SEQ_NO_FIELD = new ParseField("follower_max_seq_no"); + static final ParseField LAST_REQUESTED_SEQ_NO_FIELD = new ParseField("last_requested_seq_no"); + static final ParseField NUMBER_OF_CONCURRENT_READS_FIELD = new ParseField("number_of_concurrent_reads"); + static final ParseField NUMBER_OF_CONCURRENT_WRITES_FIELD = new ParseField("number_of_concurrent_writes"); + static final ParseField NUMBER_OF_QUEUED_WRITES_FIELD = new ParseField("number_of_queued_writes"); + static final ParseField MAPPING_VERSION_FIELD = new ParseField("mapping_version"); + static final ParseField TOTAL_FETCH_TIME_MILLIS_FIELD = new ParseField("total_fetch_time_millis"); + static final ParseField NUMBER_OF_SUCCESSFUL_FETCHES_FIELD = new ParseField("number_of_successful_fetches"); + static final ParseField NUMBER_OF_FAILED_FETCHES_FIELD = new ParseField("number_of_failed_fetches"); + static final ParseField OPERATIONS_RECEIVED_FIELD = new ParseField("operations_received"); + static final ParseField TOTAL_TRANSFERRED_BYTES = new ParseField("total_transferred_bytes"); + static final ParseField TOTAL_INDEX_TIME_MILLIS_FIELD = new ParseField("total_index_time_millis"); + static final ParseField NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD = new ParseField("number_of_successful_bulk_operations"); + static final ParseField NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD = new ParseField("number_of_failed_bulk_operations"); + static final ParseField NUMBER_OF_OPERATIONS_INDEXED_FIELD = new ParseField("number_of_operations_indexed"); + static final ParseField FETCH_EXCEPTIONS = new ParseField("fetch_exceptions"); + static final ParseField TIME_SINCE_LAST_FETCH_MILLIS_FIELD = new ParseField("time_since_last_fetch_millis"); + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser STATUS_PARSER = new ConstructingObjectParser<>(STATUS_PARSER_NAME, + args -> new Status( + (String) args[0], + (int) args[1], + (long) args[2], + (long) args[3], + (long) args[4], + (long) args[5], + (long) args[6], + (int) args[7], + (int) args[8], + (int) args[9], + (long) args[10], + (long) args[11], + (long) args[12], + (long) args[13], + (long) args[14], + (long) args[15], + (long) args[16], + (long) args[17], + (long) args[18], + (long) args[19], + new TreeMap<>( + ((List>) args[20]) + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))), + (long) args[21])); + + public static final String FETCH_EXCEPTIONS_ENTRY_PARSER_NAME = "shard-follow-node-task-status-fetch-exceptions-entry"; + + static final ConstructingObjectParser, Void> FETCH_EXCEPTIONS_ENTRY_PARSER = + new ConstructingObjectParser<>( + FETCH_EXCEPTIONS_ENTRY_PARSER_NAME, + args -> new AbstractMap.SimpleEntry<>((long) args[0], (ElasticsearchException) args[1])); + + static { + STATUS_PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_INDEX); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), SHARD_ID); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_GLOBAL_CHECKPOINT_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LEADER_MAX_SEQ_NO_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_GLOBAL_CHECKPOINT_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FOLLOWER_MAX_SEQ_NO_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), LAST_REQUESTED_SEQ_NO_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_READS_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_CONCURRENT_WRITES_FIELD); + STATUS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_QUEUED_WRITES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), MAPPING_VERSION_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_FETCH_TIME_MILLIS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_FETCHES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_FETCHES_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), OPERATIONS_RECEIVED_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_TRANSFERRED_BYTES); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TOTAL_INDEX_TIME_MILLIS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), NUMBER_OF_OPERATIONS_INDEXED_FIELD); + STATUS_PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_PARSER, FETCH_EXCEPTIONS); + STATUS_PARSER.declareLong(ConstructingObjectParser.constructorArg(), TIME_SINCE_LAST_FETCH_MILLIS_FIELD); + } + + static final ParseField FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO = new ParseField("from_seq_no"); + static final ParseField FETCH_EXCEPTIONS_ENTRY_EXCEPTION = new ParseField("exception"); + + static { + FETCH_EXCEPTIONS_ENTRY_PARSER.declareLong(ConstructingObjectParser.constructorArg(), FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO); + FETCH_EXCEPTIONS_ENTRY_PARSER.declareObject( + ConstructingObjectParser.constructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), + FETCH_EXCEPTIONS_ENTRY_EXCEPTION); + } + + private final String leaderIndex; + + public String leaderIndex() { + return leaderIndex; + } + + private final int shardId; + + public int getShardId() { + return shardId; + } + + private final long leaderGlobalCheckpoint; + + public long leaderGlobalCheckpoint() { + return leaderGlobalCheckpoint; + } + + private final long leaderMaxSeqNo; + + public long leaderMaxSeqNo() { + return leaderMaxSeqNo; + } + + private final long followerGlobalCheckpoint; + + public long followerGlobalCheckpoint() { + return followerGlobalCheckpoint; + } + + private final long followerMaxSeqNo; + + public long followerMaxSeqNo() { + return followerMaxSeqNo; + } + + private final long lastRequestedSeqNo; + + public long lastRequestedSeqNo() { + return lastRequestedSeqNo; + } + + private final int numberOfConcurrentReads; + + public int numberOfConcurrentReads() { + return numberOfConcurrentReads; + } + + private final int numberOfConcurrentWrites; + + public int numberOfConcurrentWrites() { + return numberOfConcurrentWrites; + } + + private final int numberOfQueuedWrites; + + public int numberOfQueuedWrites() { + return numberOfQueuedWrites; + } + + private final long mappingVersion; + + public long mappingVersion() { + return mappingVersion; + } + + private final long totalFetchTimeMillis; + + public long totalFetchTimeMillis() { + return totalFetchTimeMillis; + } + + private final long numberOfSuccessfulFetches; + + public long numberOfSuccessfulFetches() { + return numberOfSuccessfulFetches; + } + + private final long numberOfFailedFetches; + + public long numberOfFailedFetches() { + return numberOfFailedFetches; + } + + private final long operationsReceived; + + public long operationsReceived() { + return operationsReceived; + } + + private final long totalTransferredBytes; + + public long totalTransferredBytes() { + return totalTransferredBytes; + } + + private final long totalIndexTimeMillis; + + public long totalIndexTimeMillis() { + return totalIndexTimeMillis; + } + + private final long numberOfSuccessfulBulkOperations; + + public long numberOfSuccessfulBulkOperations() { + return numberOfSuccessfulBulkOperations; + } + + private final long numberOfFailedBulkOperations; + + public long numberOfFailedBulkOperations() { + return numberOfFailedBulkOperations; + } + + private final long numberOfOperationsIndexed; + + public long numberOfOperationsIndexed() { + return numberOfOperationsIndexed; + } + + private final NavigableMap fetchExceptions; + + public NavigableMap fetchExceptions() { + return fetchExceptions; + } + + private final long timeSinceLastFetchMillis; + + public long timeSinceLastFetchMillis() { + return timeSinceLastFetchMillis; + } + + Status( + final String leaderIndex, + final int shardId, + final long leaderGlobalCheckpoint, + final long leaderMaxSeqNo, + final long followerGlobalCheckpoint, + final long followerMaxSeqNo, + final long lastRequestedSeqNo, + final int numberOfConcurrentReads, + final int numberOfConcurrentWrites, + final int numberOfQueuedWrites, + final long mappingVersion, + final long totalFetchTimeMillis, + final long numberOfSuccessfulFetches, + final long numberOfFailedFetches, + final long operationsReceived, + final long totalTransferredBytes, + final long totalIndexTimeMillis, + final long numberOfSuccessfulBulkOperations, + final long numberOfFailedBulkOperations, + final long numberOfOperationsIndexed, + final NavigableMap fetchExceptions, + final long timeSinceLastFetchMillis) { + this.leaderIndex = leaderIndex; + this.shardId = shardId; + this.leaderGlobalCheckpoint = leaderGlobalCheckpoint; + this.leaderMaxSeqNo = leaderMaxSeqNo; + this.followerGlobalCheckpoint = followerGlobalCheckpoint; + this.followerMaxSeqNo = followerMaxSeqNo; + this.lastRequestedSeqNo = lastRequestedSeqNo; + this.numberOfConcurrentReads = numberOfConcurrentReads; + this.numberOfConcurrentWrites = numberOfConcurrentWrites; + this.numberOfQueuedWrites = numberOfQueuedWrites; + this.mappingVersion = mappingVersion; + this.totalFetchTimeMillis = totalFetchTimeMillis; + this.numberOfSuccessfulFetches = numberOfSuccessfulFetches; + this.numberOfFailedFetches = numberOfFailedFetches; + this.operationsReceived = operationsReceived; + this.totalTransferredBytes = totalTransferredBytes; + this.totalIndexTimeMillis = totalIndexTimeMillis; + this.numberOfSuccessfulBulkOperations = numberOfSuccessfulBulkOperations; + this.numberOfFailedBulkOperations = numberOfFailedBulkOperations; + this.numberOfOperationsIndexed = numberOfOperationsIndexed; + this.fetchExceptions = Objects.requireNonNull(fetchExceptions); + this.timeSinceLastFetchMillis = timeSinceLastFetchMillis; + } + + public Status(final StreamInput in) throws IOException { + this.leaderIndex = in.readString(); + this.shardId = in.readVInt(); + this.leaderGlobalCheckpoint = in.readZLong(); + this.leaderMaxSeqNo = in.readZLong(); + this.followerGlobalCheckpoint = in.readZLong(); + this.followerMaxSeqNo = in.readZLong(); + this.lastRequestedSeqNo = in.readZLong(); + this.numberOfConcurrentReads = in.readVInt(); + this.numberOfConcurrentWrites = in.readVInt(); + this.numberOfQueuedWrites = in.readVInt(); + this.mappingVersion = in.readVLong(); + this.totalFetchTimeMillis = in.readVLong(); + this.numberOfSuccessfulFetches = in.readVLong(); + this.numberOfFailedFetches = in.readVLong(); + this.operationsReceived = in.readVLong(); + this.totalTransferredBytes = in.readVLong(); + this.totalIndexTimeMillis = in.readVLong(); + this.numberOfSuccessfulBulkOperations = in.readVLong(); + this.numberOfFailedBulkOperations = in.readVLong(); + this.numberOfOperationsIndexed = in.readVLong(); + this.fetchExceptions = new TreeMap<>(in.readMap(StreamInput::readVLong, StreamInput::readException)); + this.timeSinceLastFetchMillis = in.readZLong(); + } + + @Override + public String getWriteableName() { + return STATUS_PARSER_NAME; + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeString(leaderIndex); + out.writeVInt(shardId); + out.writeZLong(leaderGlobalCheckpoint); + out.writeZLong(leaderMaxSeqNo); + out.writeZLong(followerGlobalCheckpoint); + out.writeZLong(followerMaxSeqNo); + out.writeZLong(lastRequestedSeqNo); + out.writeVInt(numberOfConcurrentReads); + out.writeVInt(numberOfConcurrentWrites); + out.writeVInt(numberOfQueuedWrites); + out.writeVLong(mappingVersion); + out.writeVLong(totalFetchTimeMillis); + out.writeVLong(numberOfSuccessfulFetches); + out.writeVLong(numberOfFailedFetches); + out.writeVLong(operationsReceived); + out.writeVLong(totalTransferredBytes); + out.writeVLong(totalIndexTimeMillis); + out.writeVLong(numberOfSuccessfulBulkOperations); + out.writeVLong(numberOfFailedBulkOperations); + out.writeVLong(numberOfOperationsIndexed); + out.writeMap(fetchExceptions, StreamOutput::writeVLong, StreamOutput::writeException); + out.writeZLong(timeSinceLastFetchMillis); + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + builder.startObject(); + { + builder.field(LEADER_INDEX.getPreferredName(), leaderIndex); + builder.field(SHARD_ID.getPreferredName(), shardId); + builder.field(LEADER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), leaderGlobalCheckpoint); + builder.field(LEADER_MAX_SEQ_NO_FIELD.getPreferredName(), leaderMaxSeqNo); + builder.field(FOLLOWER_GLOBAL_CHECKPOINT_FIELD.getPreferredName(), followerGlobalCheckpoint); + builder.field(FOLLOWER_MAX_SEQ_NO_FIELD.getPreferredName(), followerMaxSeqNo); + builder.field(LAST_REQUESTED_SEQ_NO_FIELD.getPreferredName(), lastRequestedSeqNo); + builder.field(NUMBER_OF_CONCURRENT_READS_FIELD.getPreferredName(), numberOfConcurrentReads); + builder.field(NUMBER_OF_CONCURRENT_WRITES_FIELD.getPreferredName(), numberOfConcurrentWrites); + builder.field(NUMBER_OF_QUEUED_WRITES_FIELD.getPreferredName(), numberOfQueuedWrites); + builder.field(MAPPING_VERSION_FIELD.getPreferredName(), mappingVersion); + builder.humanReadableField( + TOTAL_FETCH_TIME_MILLIS_FIELD.getPreferredName(), + "total_fetch_time", + new TimeValue(totalFetchTimeMillis, TimeUnit.MILLISECONDS)); + builder.field(NUMBER_OF_SUCCESSFUL_FETCHES_FIELD.getPreferredName(), numberOfSuccessfulFetches); + builder.field(NUMBER_OF_FAILED_FETCHES_FIELD.getPreferredName(), numberOfFailedFetches); + builder.field(OPERATIONS_RECEIVED_FIELD.getPreferredName(), operationsReceived); + builder.humanReadableField( + TOTAL_TRANSFERRED_BYTES.getPreferredName(), + "total_transferred", + new ByteSizeValue(totalTransferredBytes, ByteSizeUnit.BYTES)); + builder.humanReadableField( + TOTAL_INDEX_TIME_MILLIS_FIELD.getPreferredName(), + "total_index_time", + new TimeValue(totalIndexTimeMillis, TimeUnit.MILLISECONDS)); + builder.field(NUMBER_OF_SUCCESSFUL_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfSuccessfulBulkOperations); + builder.field(NUMBER_OF_FAILED_BULK_OPERATIONS_FIELD.getPreferredName(), numberOfFailedBulkOperations); + builder.field(NUMBER_OF_OPERATIONS_INDEXED_FIELD.getPreferredName(), numberOfOperationsIndexed); + builder.startArray(FETCH_EXCEPTIONS.getPreferredName()); + { + for (final Map.Entry entry : fetchExceptions.entrySet()) { + builder.startObject(); + { + builder.field(FETCH_EXCEPTIONS_ENTRY_FROM_SEQ_NO.getPreferredName(), entry.getKey()); + builder.field(FETCH_EXCEPTIONS_ENTRY_EXCEPTION.getPreferredName()); + builder.startObject(); + { + ElasticsearchException.generateThrowableXContent(builder, params, entry.getValue()); + } + builder.endObject(); + } + builder.endObject(); + } + } + builder.endArray(); + builder.humanReadableField( + TIME_SINCE_LAST_FETCH_MILLIS_FIELD.getPreferredName(), + "time_since_last_fetch", + new TimeValue(timeSinceLastFetchMillis, TimeUnit.MILLISECONDS)); + } + builder.endObject(); + return builder; + } + + public static Status fromXContent(final XContentParser parser) { + return STATUS_PARSER.apply(parser, null); + } + + @Override + public boolean equals(final Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final Status that = (Status) o; + return leaderIndex.equals(that.leaderIndex) && + shardId == that.shardId && + leaderGlobalCheckpoint == that.leaderGlobalCheckpoint && + leaderMaxSeqNo == that.leaderMaxSeqNo && + followerGlobalCheckpoint == that.followerGlobalCheckpoint && + followerMaxSeqNo == that.followerMaxSeqNo && + lastRequestedSeqNo == that.lastRequestedSeqNo && + numberOfConcurrentReads == that.numberOfConcurrentReads && + numberOfConcurrentWrites == that.numberOfConcurrentWrites && + numberOfQueuedWrites == that.numberOfQueuedWrites && + mappingVersion == that.mappingVersion && + totalFetchTimeMillis == that.totalFetchTimeMillis && + numberOfSuccessfulFetches == that.numberOfSuccessfulFetches && + numberOfFailedFetches == that.numberOfFailedFetches && + operationsReceived == that.operationsReceived && + totalTransferredBytes == that.totalTransferredBytes && + numberOfSuccessfulBulkOperations == that.numberOfSuccessfulBulkOperations && + numberOfFailedBulkOperations == that.numberOfFailedBulkOperations && + numberOfOperationsIndexed == that.numberOfOperationsIndexed && + /* + * ElasticsearchException does not implement equals so we will assume the fetch exceptions are equal if they are equal + * up to the key set and their messages. Note that we are relying on the fact that the fetch exceptions are ordered by + * keys. + */ + fetchExceptions.keySet().equals(that.fetchExceptions.keySet()) && + getFetchExceptionMessages(this).equals(getFetchExceptionMessages(that)) && + timeSinceLastFetchMillis == that.timeSinceLastFetchMillis; + } + + @Override + public int hashCode() { + return Objects.hash( + leaderIndex, + shardId, + leaderGlobalCheckpoint, + leaderMaxSeqNo, + followerGlobalCheckpoint, + followerMaxSeqNo, + lastRequestedSeqNo, + numberOfConcurrentReads, + numberOfConcurrentWrites, + numberOfQueuedWrites, + mappingVersion, + totalFetchTimeMillis, + numberOfSuccessfulFetches, + numberOfFailedFetches, + operationsReceived, + totalTransferredBytes, + numberOfSuccessfulBulkOperations, + numberOfFailedBulkOperations, + numberOfOperationsIndexed, + /* + * ElasticsearchException does not implement hash code so we will compute the hash code based on the key set and the + * messages. Note that we are relying on the fact that the fetch exceptions are ordered by keys. + */ + fetchExceptions.keySet(), + getFetchExceptionMessages(this), + timeSinceLastFetchMillis); + } + + private static List getFetchExceptionMessages(final Status status) { + return status.fetchExceptions().values().stream().map(ElasticsearchException::getMessage).collect(Collectors.toList()); + } + + public String toString() { + return Strings.toString(this); + } + + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java new file mode 100644 index 00000000000..82482792f39 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java @@ -0,0 +1,251 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { + + public static final String NAME = "xpack/ccr/shard_follow_task"; + + // list of headers that will be stored when a job is created + public static final Set HEADER_FILTERS = + Collections.unmodifiableSet(new HashSet<>(Arrays.asList("es-security-runas-user", "_xpack_security_authentication"))); + + static final ParseField LEADER_CLUSTER_ALIAS_FIELD = new ParseField("leader_cluster_alias"); + static final ParseField FOLLOW_SHARD_INDEX_FIELD = new ParseField("follow_shard_index"); + static final ParseField FOLLOW_SHARD_INDEX_UUID_FIELD = new ParseField("follow_shard_index_uuid"); + static final ParseField FOLLOW_SHARD_SHARDID_FIELD = new ParseField("follow_shard_shard"); + static final ParseField LEADER_SHARD_INDEX_FIELD = new ParseField("leader_shard_index"); + static final ParseField LEADER_SHARD_INDEX_UUID_FIELD = new ParseField("leader_shard_index_uuid"); + static final ParseField LEADER_SHARD_SHARDID_FIELD = new ParseField("leader_shard_shard"); + static final ParseField HEADERS = new ParseField("headers"); + public static final ParseField MAX_BATCH_OPERATION_COUNT = new ParseField("max_batch_operation_count"); + public static final ParseField MAX_CONCURRENT_READ_BATCHES = new ParseField("max_concurrent_read_batches"); + public static final ParseField MAX_BATCH_SIZE_IN_BYTES = new ParseField("max_batch_size_in_bytes"); + public static final ParseField MAX_CONCURRENT_WRITE_BATCHES = new ParseField("max_concurrent_write_batches"); + public static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); + public static final ParseField RETRY_TIMEOUT = new ParseField("retry_timeout"); + public static final ParseField IDLE_SHARD_RETRY_DELAY = new ParseField("idle_shard_retry_delay"); + + @SuppressWarnings("unchecked") + private static ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + (a) -> new ShardFollowTask((String) a[0], new ShardId((String) a[1], (String) a[2], (int) a[3]), + new ShardId((String) a[4], (String) a[5], (int) a[6]), (int) a[7], (int) a[8], (long) a[9], + (int) a[10], (int) a[11], (TimeValue) a[12], (TimeValue) a[13], (Map) a[14])); + + static { + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), LEADER_CLUSTER_ALIAS_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), FOLLOW_SHARD_INDEX_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), FOLLOW_SHARD_INDEX_UUID_FIELD); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), FOLLOW_SHARD_SHARDID_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_SHARD_INDEX_FIELD); + PARSER.declareString(ConstructingObjectParser.constructorArg(), LEADER_SHARD_INDEX_UUID_FIELD); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), LEADER_SHARD_SHARDID_FIELD); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_BATCH_OPERATION_COUNT); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_CONCURRENT_READ_BATCHES); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), MAX_BATCH_SIZE_IN_BYTES); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_CONCURRENT_WRITE_BATCHES); + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_WRITE_BUFFER_SIZE); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), RETRY_TIMEOUT.getPreferredName()), + RETRY_TIMEOUT, ObjectParser.ValueType.STRING); + PARSER.declareField(ConstructingObjectParser.constructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), IDLE_SHARD_RETRY_DELAY.getPreferredName()), + IDLE_SHARD_RETRY_DELAY, ObjectParser.ValueType.STRING); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), (p, c) -> p.mapStrings(), HEADERS); + } + + private final String leaderClusterAlias; + private final ShardId followShardId; + private final ShardId leaderShardId; + private final int maxBatchOperationCount; + private final int maxConcurrentReadBatches; + private final long maxBatchSizeInBytes; + private final int maxConcurrentWriteBatches; + private final int maxWriteBufferSize; + private final TimeValue retryTimeout; + private final TimeValue idleShardRetryDelay; + private final Map headers; + + ShardFollowTask(String leaderClusterAlias, ShardId followShardId, ShardId leaderShardId, int maxBatchOperationCount, + int maxConcurrentReadBatches, long maxBatchSizeInBytes, int maxConcurrentWriteBatches, + int maxWriteBufferSize, TimeValue retryTimeout, TimeValue idleShardRetryDelay, Map headers) { + this.leaderClusterAlias = leaderClusterAlias; + this.followShardId = followShardId; + this.leaderShardId = leaderShardId; + this.maxBatchOperationCount = maxBatchOperationCount; + this.maxConcurrentReadBatches = maxConcurrentReadBatches; + this.maxBatchSizeInBytes = maxBatchSizeInBytes; + this.maxConcurrentWriteBatches = maxConcurrentWriteBatches; + this.maxWriteBufferSize = maxWriteBufferSize; + this.retryTimeout = retryTimeout; + this.idleShardRetryDelay = idleShardRetryDelay; + this.headers = headers != null ? Collections.unmodifiableMap(headers) : Collections.emptyMap(); + } + + public ShardFollowTask(StreamInput in) throws IOException { + this.leaderClusterAlias = in.readOptionalString(); + this.followShardId = ShardId.readShardId(in); + this.leaderShardId = ShardId.readShardId(in); + this.maxBatchOperationCount = in.readVInt(); + this.maxConcurrentReadBatches = in.readVInt(); + this.maxBatchSizeInBytes = in.readVLong(); + this.maxConcurrentWriteBatches = in.readVInt(); + this.maxWriteBufferSize = in.readVInt(); + this.retryTimeout = in.readTimeValue(); + this.idleShardRetryDelay = in.readTimeValue(); + this.headers = Collections.unmodifiableMap(in.readMap(StreamInput::readString, StreamInput::readString)); + } + + public String getLeaderClusterAlias() { + return leaderClusterAlias; + } + + public ShardId getFollowShardId() { + return followShardId; + } + + public ShardId getLeaderShardId() { + return leaderShardId; + } + + public int getMaxBatchOperationCount() { + return maxBatchOperationCount; + } + + public int getMaxConcurrentReadBatches() { + return maxConcurrentReadBatches; + } + + public int getMaxConcurrentWriteBatches() { + return maxConcurrentWriteBatches; + } + + public int getMaxWriteBufferSize() { + return maxWriteBufferSize; + } + + public long getMaxBatchSizeInBytes() { + return maxBatchSizeInBytes; + } + + public TimeValue getRetryTimeout() { + return retryTimeout; + } + + public TimeValue getIdleShardRetryDelay() { + return idleShardRetryDelay; + } + + public String getTaskId() { + return followShardId.getIndex().getUUID() + "-" + followShardId.getId(); + } + + public Map getHeaders() { + return headers; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(leaderClusterAlias); + followShardId.writeTo(out); + leaderShardId.writeTo(out); + out.writeVLong(maxBatchOperationCount); + out.writeVInt(maxConcurrentReadBatches); + out.writeVLong(maxBatchSizeInBytes); + out.writeVInt(maxConcurrentWriteBatches); + out.writeVInt(maxWriteBufferSize); + out.writeTimeValue(retryTimeout); + out.writeTimeValue(idleShardRetryDelay); + out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + } + + public static ShardFollowTask fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (leaderClusterAlias != null) { + builder.field(LEADER_CLUSTER_ALIAS_FIELD.getPreferredName(), leaderClusterAlias); + } + builder.field(FOLLOW_SHARD_INDEX_FIELD.getPreferredName(), followShardId.getIndex().getName()); + builder.field(FOLLOW_SHARD_INDEX_UUID_FIELD.getPreferredName(), followShardId.getIndex().getUUID()); + builder.field(FOLLOW_SHARD_SHARDID_FIELD.getPreferredName(), followShardId.id()); + builder.field(LEADER_SHARD_INDEX_FIELD.getPreferredName(), leaderShardId.getIndex().getName()); + builder.field(LEADER_SHARD_INDEX_UUID_FIELD.getPreferredName(), leaderShardId.getIndex().getUUID()); + builder.field(LEADER_SHARD_SHARDID_FIELD.getPreferredName(), leaderShardId.id()); + builder.field(MAX_BATCH_OPERATION_COUNT.getPreferredName(), maxBatchOperationCount); + builder.field(MAX_CONCURRENT_READ_BATCHES.getPreferredName(), maxConcurrentReadBatches); + builder.field(MAX_BATCH_SIZE_IN_BYTES.getPreferredName(), maxBatchSizeInBytes); + builder.field(MAX_CONCURRENT_WRITE_BATCHES.getPreferredName(), maxConcurrentWriteBatches); + builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize); + builder.field(RETRY_TIMEOUT.getPreferredName(), retryTimeout.getStringRep()); + builder.field(IDLE_SHARD_RETRY_DELAY.getPreferredName(), idleShardRetryDelay.getStringRep()); + builder.field(HEADERS.getPreferredName(), headers); + return builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ShardFollowTask that = (ShardFollowTask) o; + return Objects.equals(leaderClusterAlias, that.leaderClusterAlias) && + Objects.equals(followShardId, that.followShardId) && + Objects.equals(leaderShardId, that.leaderShardId) && + maxBatchOperationCount == that.maxBatchOperationCount && + maxConcurrentReadBatches == that.maxConcurrentReadBatches && + maxConcurrentWriteBatches == that.maxConcurrentWriteBatches && + maxBatchSizeInBytes == that.maxBatchSizeInBytes && + maxWriteBufferSize == that.maxWriteBufferSize && + Objects.equals(retryTimeout, that.retryTimeout) && + Objects.equals(idleShardRetryDelay, that.idleShardRetryDelay) && + Objects.equals(headers, that.headers); + } + + @Override + public int hashCode() { + return Objects.hash(leaderClusterAlias, followShardId, leaderShardId, maxBatchOperationCount, maxConcurrentReadBatches, + maxConcurrentWriteBatches, maxBatchSizeInBytes, maxWriteBufferSize, retryTimeout, idleShardRetryDelay, headers); + } + + public String toString() { + return Strings.toString(this); + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.V_6_4_0; + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java new file mode 100644 index 00000000000..83e3e4806e1 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -0,0 +1,207 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.support.ContextPreservingActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.FilterClient; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.persistent.AllocatedPersistentTask; +import org.elasticsearch.persistent.PersistentTaskState; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ccr.Ccr; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsAction; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsRequest; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.LongConsumer; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +public class ShardFollowTasksExecutor extends PersistentTasksExecutor { + + private final Client client; + private final ThreadPool threadPool; + + public ShardFollowTasksExecutor(Settings settings, Client client, ThreadPool threadPool) { + super(settings, ShardFollowTask.NAME, Ccr.CCR_THREAD_POOL_NAME); + this.client = client; + this.threadPool = threadPool; + } + + @Override + public void validate(ShardFollowTask params, ClusterState clusterState) { + if (params.getLeaderClusterAlias() == null) { + // We can only validate IndexRoutingTable in local cluster, + // for remote cluster we would need to make a remote call and we cannot do this here. + IndexRoutingTable routingTable = clusterState.getRoutingTable().index(params.getLeaderShardId().getIndex()); + if (routingTable.shard(params.getLeaderShardId().id()).primaryShard().started() == false) { + throw new IllegalArgumentException("Not all copies of leader shard are started"); + } + } + + IndexRoutingTable routingTable = clusterState.getRoutingTable().index(params.getFollowShardId().getIndex()); + if (routingTable.shard(params.getFollowShardId().id()).primaryShard().started() == false) { + throw new IllegalArgumentException("Not all copies of follow shard are started"); + } + } + + @Override + protected AllocatedPersistentTask createTask(long id, String type, String action, TaskId parentTaskId, + PersistentTasksCustomMetaData.PersistentTask taskInProgress, + Map headers) { + ShardFollowTask params = taskInProgress.getParams(); + final Client leaderClient; + if (params.getLeaderClusterAlias() != null) { + leaderClient = wrapClient(client.getRemoteClusterClient(params.getLeaderClusterAlias()), params); + } else { + leaderClient = wrapClient(client, params); + } + Client followerClient = wrapClient(client, params); + BiConsumer scheduler = + (delay, command) -> threadPool.schedule(delay, Ccr.CCR_THREAD_POOL_NAME, command); + return new ShardFollowNodeTask( + id, type, action, getDescription(taskInProgress), parentTaskId, headers, params, scheduler, System::nanoTime) { + + @Override + protected void innerUpdateMapping(LongConsumer handler, Consumer errorHandler) { + Index leaderIndex = params.getLeaderShardId().getIndex(); + Index followIndex = params.getFollowShardId().getIndex(); + + ClusterStateRequest clusterStateRequest = new ClusterStateRequest(); + clusterStateRequest.clear(); + clusterStateRequest.metaData(true); + clusterStateRequest.indices(leaderIndex.getName()); + + leaderClient.admin().cluster().state(clusterStateRequest, ActionListener.wrap(clusterStateResponse -> { + IndexMetaData indexMetaData = clusterStateResponse.getState().metaData().getIndexSafe(leaderIndex); + assert indexMetaData.getMappings().size() == 1 : "expected exactly one mapping, but got [" + + indexMetaData.getMappings().size() + "]"; + MappingMetaData mappingMetaData = indexMetaData.getMappings().iterator().next().value; + + PutMappingRequest putMappingRequest = new PutMappingRequest(followIndex.getName()); + putMappingRequest.type(mappingMetaData.type()); + putMappingRequest.source(mappingMetaData.source().string(), XContentType.JSON); + followerClient.admin().indices().putMapping(putMappingRequest, ActionListener.wrap( + putMappingResponse -> handler.accept(indexMetaData.getMappingVersion()), + errorHandler)); + }, errorHandler)); + } + + @Override + protected void innerSendBulkShardOperationsRequest( + final List operations, + final Consumer handler, + final Consumer errorHandler) { + final BulkShardOperationsRequest request = new BulkShardOperationsRequest(params.getFollowShardId(), operations); + followerClient.execute(BulkShardOperationsAction.INSTANCE, request, + ActionListener.wrap(response -> handler.accept(response), errorHandler)); + } + + @Override + protected void innerSendShardChangesRequest(long from, int maxOperationCount, Consumer handler, + Consumer errorHandler) { + ShardChangesAction.Request request = new ShardChangesAction.Request(params.getLeaderShardId()); + request.setFromSeqNo(from); + request.setMaxOperationCount(maxOperationCount); + request.setMaxOperationSizeInBytes(params.getMaxBatchSizeInBytes()); + leaderClient.execute(ShardChangesAction.INSTANCE, request, ActionListener.wrap(handler::accept, errorHandler)); + } + }; + } + + interface BiLongConsumer { + void accept(long x, long y); + } + + @Override + protected void nodeOperation(final AllocatedPersistentTask task, final ShardFollowTask params, final PersistentTaskState state) { + Client followerClient = wrapClient(client, params); + ShardFollowNodeTask shardFollowNodeTask = (ShardFollowNodeTask) task; + logger.info("{} Started to track leader shard {}", params.getFollowShardId(), params.getLeaderShardId()); + fetchGlobalCheckpoint(followerClient, params.getFollowShardId(), + (followerGCP, maxSeqNo) -> shardFollowNodeTask.start(followerGCP, maxSeqNo, followerGCP, maxSeqNo), task::markAsFailed); + } + + private void fetchGlobalCheckpoint( + final Client client, + final ShardId shardId, + final BiLongConsumer handler, + final Consumer errorHandler) { + client.admin().indices().stats(new IndicesStatsRequest().indices(shardId.getIndexName()), ActionListener.wrap(r -> { + IndexStats indexStats = r.getIndex(shardId.getIndexName()); + Optional filteredShardStats = Arrays.stream(indexStats.getShards()) + .filter(shardStats -> shardStats.getShardRouting().shardId().equals(shardId)) + .filter(shardStats -> shardStats.getShardRouting().primary()) + .findAny(); + if (filteredShardStats.isPresent()) { + final SeqNoStats seqNoStats = filteredShardStats.get().getSeqNoStats(); + final long globalCheckpoint = seqNoStats.getGlobalCheckpoint(); + final long maxSeqNo = seqNoStats.getMaxSeqNo(); + handler.accept(globalCheckpoint, maxSeqNo); + } else { + errorHandler.accept(new IllegalArgumentException("Cannot find shard stats for shard " + shardId)); + } + }, errorHandler)); + } + + private static Client wrapClient(Client client, ShardFollowTask shardFollowTask) { + if (shardFollowTask.getHeaders().isEmpty()) { + return client; + } else { + final ThreadContext threadContext = client.threadPool().getThreadContext(); + Map filteredHeaders = shardFollowTask.getHeaders().entrySet().stream() + .filter(e -> ShardFollowTask.HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + return new FilterClient(client) { + @Override + protected + void doExecute(Action action, Request request, ActionListener listener) { + final Supplier supplier = threadContext.newRestorableContext(false); + try (ThreadContext.StoredContext ignore = stashWithHeaders(threadContext, filteredHeaders)) { + super.doExecute(action, request, new ContextPreservingActionListener<>(supplier, listener)); + } + } + }; + } + } + + private static ThreadContext.StoredContext stashWithHeaders(ThreadContext threadContext, Map headers) { + final ThreadContext.StoredContext storedContext = threadContext.stashContext(); + threadContext.copyHeaders(headers.entrySet()); + return storedContext; + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java new file mode 100644 index 00000000000..33873201f5f --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportCcrStatsAction.java @@ -0,0 +1,111 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.tasks.TransportTasksAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.ccr.Ccr; +import org.elasticsearch.xpack.ccr.CcrLicenseChecker; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.function.Consumer; + +public class TransportCcrStatsAction extends TransportTasksAction< + ShardFollowNodeTask, + CcrStatsAction.TasksRequest, + CcrStatsAction.TasksResponse, CcrStatsAction.TaskResponse> { + + private final IndexNameExpressionResolver resolver; + private final CcrLicenseChecker ccrLicenseChecker; + + @Inject + public TransportCcrStatsAction( + final Settings settings, + final ClusterService clusterService, + final TransportService transportService, + final ActionFilters actionFilters, + final IndexNameExpressionResolver resolver, + final CcrLicenseChecker ccrLicenseChecker) { + super( + settings, + CcrStatsAction.NAME, + clusterService, + transportService, + actionFilters, + CcrStatsAction.TasksRequest::new, + CcrStatsAction.TasksResponse::new, + Ccr.CCR_THREAD_POOL_NAME); + this.resolver = Objects.requireNonNull(resolver); + this.ccrLicenseChecker = Objects.requireNonNull(ccrLicenseChecker); + } + + @Override + protected void doExecute( + final Task task, + final CcrStatsAction.TasksRequest request, + final ActionListener listener) { + if (ccrLicenseChecker.isCcrAllowed()) { + super.doExecute(task, request, listener); + } else { + listener.onFailure(LicenseUtils.newComplianceException("ccr")); + } + } + + @Override + protected CcrStatsAction.TasksResponse newResponse( + final CcrStatsAction.TasksRequest request, + final List taskResponses, + final List taskOperationFailures, + final List failedNodeExceptions) { + return new CcrStatsAction.TasksResponse(taskOperationFailures, failedNodeExceptions, taskResponses); + } + + @Override + protected CcrStatsAction.TaskResponse readTaskResponse(final StreamInput in) throws IOException { + return new CcrStatsAction.TaskResponse(in); + } + + @Override + protected void processTasks(final CcrStatsAction.TasksRequest request, final Consumer operation) { + final ClusterState state = clusterService.state(); + final Set concreteIndices = new HashSet<>(Arrays.asList(resolver.concreteIndexNames(state, request))); + for (final Task task : taskManager.getTasks().values()) { + if (task instanceof ShardFollowNodeTask) { + final ShardFollowNodeTask shardFollowNodeTask = (ShardFollowNodeTask) task; + if (concreteIndices.contains(shardFollowNodeTask.getFollowShardId().getIndexName())) { + operation.accept(shardFollowNodeTask); + } + } + } + } + + @Override + protected void taskOperation( + final CcrStatsAction.TasksRequest request, + final ShardFollowNodeTask task, + final ActionListener listener) { + listener.onResponse(new CcrStatsAction.TaskResponse(task.getFollowShardId(), task.getStatus())); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java new file mode 100644 index 00000000000..93b2bcc3e40 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/UnfollowIndexAction.java @@ -0,0 +1,152 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksService; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; + +public class UnfollowIndexAction extends Action { + + public static final UnfollowIndexAction INSTANCE = new UnfollowIndexAction(); + public static final String NAME = "cluster:admin/xpack/ccr/unfollow_index"; + + private UnfollowIndexAction() { + super(NAME); + } + + @Override + public AcknowledgedResponse newResponse() { + return new AcknowledgedResponse(); + } + + public static class Request extends ActionRequest { + + private String followIndex; + + public String getFollowIndex() { + return followIndex; + } + + public void setFollowIndex(String followIndex) { + this.followIndex = followIndex; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + followIndex = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(followIndex); + } + } + + public static class TransportAction extends HandledTransportAction { + + private final Client client; + private final PersistentTasksService persistentTasksService; + + @Inject + public TransportAction(Settings settings, + TransportService transportService, + ActionFilters actionFilters, + Client client, + PersistentTasksService persistentTasksService) { + super(settings, NAME, transportService, actionFilters, Request::new); + this.client = client; + this.persistentTasksService = persistentTasksService; + } + + @Override + protected void doExecute(Task task, + Request request, + ActionListener listener) { + + client.admin().cluster().state(new ClusterStateRequest(), ActionListener.wrap(r -> { + IndexMetaData followIndexMetadata = r.getState().getMetaData().index(request.followIndex); + if (followIndexMetadata == null) { + listener.onFailure(new IllegalArgumentException("follow index [" + request.followIndex + "] does not exist")); + return; + } + + final int numShards = followIndexMetadata.getNumberOfShards(); + final AtomicInteger counter = new AtomicInteger(numShards); + final AtomicReferenceArray responses = new AtomicReferenceArray<>(followIndexMetadata.getNumberOfShards()); + for (int i = 0; i < numShards; i++) { + final int shardId = i; + String taskId = followIndexMetadata.getIndexUUID() + "-" + shardId; + persistentTasksService.sendRemoveRequest(taskId, + new ActionListener>() { + @Override + public void onResponse(PersistentTasksCustomMetaData.PersistentTask task) { + responses.set(shardId, task); + finalizeResponse(); + } + + @Override + public void onFailure(Exception e) { + responses.set(shardId, e); + finalizeResponse(); + } + + void finalizeResponse() { + Exception error = null; + if (counter.decrementAndGet() == 0) { + for (int j = 0; j < responses.length(); j++) { + Object response = responses.get(j); + if (response instanceof Exception) { + if (error == null) { + error = (Exception) response; + } else { + error.addSuppressed((Throwable) response); + } + } + } + + if (error == null) { + // include task ids? + listener.onResponse(new AcknowledgedResponse(true)); + } else { + // TODO: cancel all started tasks + listener.onFailure(error); + } + } + } + }); + } + }, listener::onFailure)); + } + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsAction.java new file mode 100644 index 00000000000..a85e5c50e84 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsAction.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action.bulk; + +import org.elasticsearch.action.Action; + +public class BulkShardOperationsAction extends Action { + + public static final BulkShardOperationsAction INSTANCE = new BulkShardOperationsAction(); + public static final String NAME = "indices:data/write/bulk_shard_operations[s]"; + + private BulkShardOperationsAction() { + super(NAME); + } + + @Override + public BulkShardOperationsResponse newResponse() { + return new BulkShardOperationsResponse(); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java new file mode 100644 index 00000000000..c28789fb580 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action.bulk; + +import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; + +import java.io.IOException; +import java.util.List; + +public final class BulkShardOperationsRequest extends ReplicatedWriteRequest { + + private List operations; + + public BulkShardOperationsRequest() { + } + + public BulkShardOperationsRequest(final ShardId shardId, final List operations) { + super(shardId); + setRefreshPolicy(RefreshPolicy.NONE); + this.operations = operations; + } + + public List getOperations() { + return operations; + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + operations = in.readList(Translog.Operation::readOperation); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(operations.size()); + for (Translog.Operation operation : operations) { + Translog.Operation.writeOperation(out, operation); + } + } + + @Override + public String toString() { + return "BulkShardOperationsRequest{" + + "operations=" + operations.size()+ + ", shardId=" + shardId + + ", timeout=" + timeout + + ", index='" + index + '\'' + + ", waitForActiveShards=" + waitForActiveShards + + '}'; + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsResponse.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsResponse.java new file mode 100644 index 00000000000..0c72f02fde1 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsResponse.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action.bulk; + +import org.elasticsearch.action.support.WriteResponse; +import org.elasticsearch.action.support.replication.ReplicationResponse; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +public final class BulkShardOperationsResponse extends ReplicationResponse implements WriteResponse { + + private long globalCheckpoint; + + public long getGlobalCheckpoint() { + return globalCheckpoint; + } + + public void setGlobalCheckpoint(final long globalCheckpoint) { + this.globalCheckpoint = globalCheckpoint; + } + + private long maxSeqNo; + + public long getMaxSeqNo() { + return maxSeqNo; + } + + public void setMaxSeqNo(final long maxSeqNo) { + this.maxSeqNo = maxSeqNo; + } + + public BulkShardOperationsResponse() { + } + + @Override + public void setForcedRefresh(final boolean forcedRefresh) { + } + + @Override + public void readFrom(final StreamInput in) throws IOException { + super.readFrom(in); + globalCheckpoint = in.readZLong(); + maxSeqNo = in.readZLong(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + super.writeTo(out); + out.writeZLong(globalCheckpoint); + out.writeZLong(maxSeqNo); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java new file mode 100644 index 00000000000..4b4bce1fcce --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/TransportBulkShardOperationsAction.java @@ -0,0 +1,158 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action.bulk; + +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.TransportWriteAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; +import java.util.stream.Collectors; + +public class TransportBulkShardOperationsAction + extends TransportWriteAction { + + @Inject + public TransportBulkShardOperationsAction( + final Settings settings, + final TransportService transportService, + final ClusterService clusterService, + final IndicesService indicesService, + final ThreadPool threadPool, + final ShardStateAction shardStateAction, + final ActionFilters actionFilters, + final IndexNameExpressionResolver indexNameExpressionResolver) { + super( + settings, + BulkShardOperationsAction.NAME, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + indexNameExpressionResolver, + BulkShardOperationsRequest::new, + BulkShardOperationsRequest::new, + ThreadPool.Names.WRITE); + } + + @Override + protected WritePrimaryResult shardOperationOnPrimary( + final BulkShardOperationsRequest request, final IndexShard primary) throws Exception { + return shardOperationOnPrimary(request.shardId(), request.getOperations(), primary, logger); + } + + // public for testing purposes only + public static WritePrimaryResult shardOperationOnPrimary( + final ShardId shardId, + final List sourceOperations, + final IndexShard primary, + final Logger logger) throws IOException { + final List targetOperations = sourceOperations.stream().map(operation -> { + final Translog.Operation operationWithPrimaryTerm; + switch (operation.opType()) { + case INDEX: + final Translog.Index index = (Translog.Index) operation; + operationWithPrimaryTerm = new Translog.Index( + index.type(), + index.id(), + index.seqNo(), + primary.getOperationPrimaryTerm(), + index.version(), + BytesReference.toBytes(index.source()), + index.routing(), + index.getAutoGeneratedIdTimestamp()); + break; + case DELETE: + final Translog.Delete delete = (Translog.Delete) operation; + operationWithPrimaryTerm = new Translog.Delete( + delete.type(), + delete.id(), + delete.uid(), + delete.seqNo(), + primary.getOperationPrimaryTerm(), + delete.version()); + break; + case NO_OP: + final Translog.NoOp noOp = (Translog.NoOp) operation; + operationWithPrimaryTerm = new Translog.NoOp(noOp.seqNo(), primary.getOperationPrimaryTerm(), noOp.reason()); + break; + default: + throw new IllegalStateException("unexpected operation type [" + operation.opType() + "]"); + } + return operationWithPrimaryTerm; + }).collect(Collectors.toList()); + final Translog.Location location = applyTranslogOperations(targetOperations, primary, Engine.Operation.Origin.PRIMARY); + final BulkShardOperationsRequest replicaRequest = new BulkShardOperationsRequest(shardId, targetOperations); + return new CcrWritePrimaryResult(replicaRequest, location, primary, logger); + } + + @Override + protected WriteReplicaResult shardOperationOnReplica( + final BulkShardOperationsRequest request, final IndexShard replica) throws Exception { + final Translog.Location location = applyTranslogOperations(request.getOperations(), replica, Engine.Operation.Origin.REPLICA); + return new WriteReplicaResult<>(request, location, null, replica, logger); + } + + // public for testing purposes only + public static Translog.Location applyTranslogOperations( + final List operations, final IndexShard shard, final Engine.Operation.Origin origin) throws IOException { + Translog.Location location = null; + for (final Translog.Operation operation : operations) { + final Engine.Result result = shard.applyTranslogOperation(operation, origin); + assert result.getSeqNo() == operation.seqNo(); + assert result.getResultType() == Engine.Result.Type.SUCCESS; + location = locationToSync(location, result.getTranslogLocation()); + } + assert operations.size() == 0 || location != null; + return location; + } + + @Override + protected BulkShardOperationsResponse newResponseInstance() { + return new BulkShardOperationsResponse(); + } + + /** + * Custom write result to include global checkpoint after ops have been replicated. + */ + static class CcrWritePrimaryResult extends WritePrimaryResult { + + CcrWritePrimaryResult(BulkShardOperationsRequest request, Translog.Location location, IndexShard primary, Logger logger) { + super(request, new BulkShardOperationsResponse(), location, null, primary, logger); + } + + @Override + public synchronized void respond(ActionListener listener) { + final BulkShardOperationsResponse response = finalResponseIfSuccessful; + final SeqNoStats seqNoStats = primary.seqNoStats(); + // return a fresh global checkpoint after the operations have been replicated for the shard follow task + response.setGlobalCheckpoint(seqNoStats.getGlobalCheckpoint()); + response.setMaxSeqNo(seqNoStats.getMaxSeqNo()); + listener.onResponse(response); + } + + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java new file mode 100644 index 00000000000..24ada3755cb --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngine.java @@ -0,0 +1,88 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.index.engine; + +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.xpack.ccr.CcrSettings; + +import java.io.IOException; + +/** + * An engine implementation for following shards. + */ +public final class FollowingEngine extends InternalEngine { + + /** + * Construct a new following engine with the specified engine configuration. + * + * @param engineConfig the engine configuration + */ + FollowingEngine(final EngineConfig engineConfig) { + super(validateEngineConfig(engineConfig)); + } + + private static EngineConfig validateEngineConfig(final EngineConfig engineConfig) { + if (CcrSettings.CCR_FOLLOWING_INDEX_SETTING.get(engineConfig.getIndexSettings().getSettings()) == false) { + throw new IllegalArgumentException("a following engine can not be constructed for a non-following index"); + } + return engineConfig; + } + + private void preFlight(final Operation operation) { + /* + * We assert here so that this goes uncaught in unit tests and fails nodes in standalone tests (we want a harsh failure so that we + * do not have a situation where a shard fails and is recovered elsewhere and a test subsequently passes). We throw an exception so + * that we also prevent issues in production code. + */ + assert operation.seqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO; + if (operation.seqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO) { + throw new IllegalStateException("a following engine does not accept operations without an assigned sequence number"); + } + assert (operation.origin() == Operation.Origin.PRIMARY) == (operation.versionType() == VersionType.EXTERNAL) : + "invalid version_type in a following engine; version_type=" + operation.versionType() + "origin=" + operation.origin(); + } + + @Override + protected InternalEngine.IndexingStrategy indexingStrategyForOperation(final Index index) throws IOException { + preFlight(index); + return planIndexingAsNonPrimary(index); + } + + @Override + protected InternalEngine.DeletionStrategy deletionStrategyForOperation(final Delete delete) throws IOException { + preFlight(delete); + return planDeletionAsNonPrimary(delete); + } + + @Override + public int fillSeqNoGaps(long primaryTerm) throws IOException { + // a noop implementation, because follow shard does not own the history but the leader shard does. + return 0; + } + + @Override + protected boolean assertPrimaryIncomingSequenceNumber(final Operation.Origin origin, final long seqNo) { + // sequence number should be set when operation origin is primary + assert seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO : "primary operations on a following index must have an assigned sequence number"; + return true; + } + + @Override + protected boolean assertNonPrimaryOrigin(final Operation operation) { + return true; + } + + @Override + protected boolean assertPrimaryCanOptimizeAddDocument(final Index index) { + assert index.version() == 1 && index.versionType() == VersionType.EXTERNAL + : "version [" + index.version() + "], type [" + index.versionType() + "]"; + return true; + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineFactory.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineFactory.java new file mode 100644 index 00000000000..ab76d02c66e --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineFactory.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.index.engine; + +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.EngineFactory; + +/** + * An engine factory for following engines. + */ +public final class FollowingEngineFactory implements EngineFactory { + + @Override + public Engine newReadWriteEngine(final EngineConfig config) { + return new FollowingEngine(config); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java new file mode 100644 index 00000000000..df34fd6cd45 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.rest; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.ccr.action.CcrStatsAction; + +import java.io.IOException; + +public class RestCcrStatsAction extends BaseRestHandler { + + public RestCcrStatsAction(final Settings settings, final RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.GET, "/_ccr/stats", this); + controller.registerHandler(RestRequest.Method.GET, "/_ccr/stats/{index}", this); + } + + @Override + public String getName() { + return "ccr_stats"; + } + + @Override + protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) throws IOException { + final CcrStatsAction.TasksRequest request = new CcrStatsAction.TasksRequest(); + request.setIndices(Strings.splitStringByCommaToArray(restRequest.param("index"))); + request.setIndicesOptions(IndicesOptions.fromRequest(restRequest, request.indicesOptions())); + return channel -> client.execute(CcrStatsAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } + +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCreateAndFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCreateAndFollowIndexAction.java new file mode 100644 index 00000000000..4d9079b36c9 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCreateAndFollowIndexAction.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.rest; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; + +import static org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction.INSTANCE; +import static org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction.Request; + +public class RestCreateAndFollowIndexAction extends BaseRestHandler { + + public RestCreateAndFollowIndexAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_ccr/create_and_follow", this); + } + + @Override + public String getName() { + return "ccr_create_and_follow_index_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + Request request = new Request(RestFollowIndexAction.createRequest(restRequest)); + return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowIndexAction.java new file mode 100644 index 00000000000..88f5b74f4b1 --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowIndexAction.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.rest; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; + +import static org.elasticsearch.xpack.ccr.action.FollowIndexAction.INSTANCE; +import static org.elasticsearch.xpack.ccr.action.FollowIndexAction.Request; + +public class RestFollowIndexAction extends BaseRestHandler { + + public RestFollowIndexAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_ccr/follow", this); + } + + @Override + public String getName() { + return "ccr_follow_index_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + Request request = createRequest(restRequest); + return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); + } + + static Request createRequest(RestRequest restRequest) throws IOException { + try (XContentParser parser = restRequest.contentOrSourceParamParser()) { + return Request.fromXContent(parser, restRequest.param("index")); + } + } +} diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowIndexAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowIndexAction.java new file mode 100644 index 00000000000..2df6c77379b --- /dev/null +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestUnfollowIndexAction.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.rest; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; + +import java.io.IOException; + +import static org.elasticsearch.xpack.ccr.action.UnfollowIndexAction.INSTANCE; +import static org.elasticsearch.xpack.ccr.action.UnfollowIndexAction.Request; + +public class RestUnfollowIndexAction extends BaseRestHandler { + + public RestUnfollowIndexAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_ccr/unfollow", this); + } + + @Override + public String getName() { + return "ccr_unfollow_index_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { + Request request = new Request(); + request.setFollowIndex(restRequest.param("index")); + return channel -> client.execute(INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/ccr/src/main/plugin-metadata/plugin-security.policy b/x-pack/plugin/ccr/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 00000000000..45d92fd2b8a --- /dev/null +++ b/x-pack/plugin/ccr/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,50 @@ +grant { + // needed because of problems in unbound LDAP library + permission java.util.PropertyPermission "*", "read,write"; + + // required to configure the custom mailcap for watcher + permission java.lang.RuntimePermission "setFactory"; + + // needed when sending emails for javax.activation + // otherwise a classnotfound exception is thrown due to trying + // to load the class with the application class loader + permission java.lang.RuntimePermission "setContextClassLoader"; + permission java.lang.RuntimePermission "getClassLoader"; + // TODO: remove use of this jar as soon as possible!!!! + permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries"; + + // bouncy castle + permission java.security.SecurityPermission "putProviderProperty.BC"; + + // needed for x-pack security extension + permission java.security.SecurityPermission "createPolicy.JavaPolicy"; + permission java.security.SecurityPermission "getPolicy"; + permission java.security.SecurityPermission "setPolicy"; + + // needed for multiple server implementations used in tests + permission java.net.SocketPermission "*", "accept,connect"; + + // needed for Windows named pipes in machine learning + permission java.io.FilePermission "\\\\.\\pipe\\*", "read,write"; +}; + +grant codeBase "${codebase.netty-common}" { + // for reading the system-wide configuration for the backlog of established sockets + permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; +}; + +grant codeBase "${codebase.netty-transport}" { + // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 + // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! + permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; +}; + +grant codeBase "${codebase.elasticsearch-rest-client}" { + // rest client uses system properties which gets the default proxy + permission java.net.NetPermission "getProxySelector"; +}; + +grant codeBase "${codebase.httpasyncclient}" { + // rest client uses system properties which gets the default proxy + permission java.net.NetPermission "getProxySelector"; +}; \ No newline at end of file diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java new file mode 100644 index 00000000000..675758903bf --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java @@ -0,0 +1,113 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.xpack.ccr.action.CcrStatsAction; +import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask; + +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.CountDownLatch; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class CcrLicenseIT extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return Collections.singletonList(IncompatibleLicenseLocalStateCcr.class); + } + + public void testThatFollowingIndexIsUnavailableWithIncompatibleLicense() throws InterruptedException { + final FollowIndexAction.Request followRequest = getFollowRequest(); + final CountDownLatch latch = new CountDownLatch(1); + client().execute( + FollowIndexAction.INSTANCE, + followRequest, + new ActionListener() { + @Override + public void onResponse(final AcknowledgedResponse response) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + assertIncompatibleLicense(e); + latch.countDown(); + } + }); + latch.await(); + } + + public void testThatCreateAndFollowingIndexIsUnavailableWithIncompatibleLicense() throws InterruptedException { + final FollowIndexAction.Request followRequest = getFollowRequest(); + final CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); + final CountDownLatch latch = new CountDownLatch(1); + client().execute( + CreateAndFollowIndexAction.INSTANCE, + createAndFollowRequest, + new ActionListener() { + @Override + public void onResponse(final CreateAndFollowIndexAction.Response response) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + assertIncompatibleLicense(e); + latch.countDown(); + } + }); + latch.await(); + } + + public void testThatCcrStatsAreUnavailableWithIncompatibleLicense() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + client().execute(CcrStatsAction.INSTANCE, new CcrStatsAction.TasksRequest(), new ActionListener() { + @Override + public void onResponse(final CcrStatsAction.TasksResponse tasksResponse) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + assertIncompatibleLicense(e); + latch.countDown(); + } + }); + + latch.await(); + } + + private void assertIncompatibleLicense(final Exception e) { + assertThat(e, instanceOf(ElasticsearchSecurityException.class)); + assertThat(e.getMessage(), equalTo("current license is non-compliant for [ccr]")); + } + + private FollowIndexAction.Request getFollowRequest() { + return new FollowIndexAction.Request( + "leader", + "follower", + ShardFollowNodeTask.DEFAULT_MAX_BATCH_OPERATION_COUNT, + ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_READ_BATCHES, + ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, + ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, + ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE, + TimeValue.timeValueMillis(10), + TimeValue.timeValueMillis(10)); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrTests.java new file mode 100644 index 00000000000..0a9ca00590b --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrTests.java @@ -0,0 +1,55 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ccr.Ccr; +import org.elasticsearch.xpack.ccr.index.engine.FollowingEngineFactory; + +import java.io.IOException; +import java.util.Optional; + +import static org.hamcrest.Matchers.instanceOf; + +public class CcrTests extends ESTestCase { + + public void testGetEngineFactory() throws IOException { + final Boolean[] values = new Boolean[] { true, false, null }; + for (final Boolean value : values) { + final String indexName = "following-" + value; + final Index index = new Index(indexName, UUIDs.randomBase64UUID()); + final Settings.Builder builder = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()); + if (value != null) { + builder.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), value); + } + + final IndexMetaData indexMetaData = new IndexMetaData.Builder(index.getName()) + .settings(builder.build()) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + final Ccr ccr = new Ccr(Settings.EMPTY, new CcrLicenseChecker(() -> true)); + final Optional engineFactory = ccr.getEngineFactory(new IndexSettings(indexMetaData, Settings.EMPTY)); + if (value != null && value) { + assertTrue(engineFactory.isPresent()); + assertThat(engineFactory.get(), instanceOf(FollowingEngineFactory.class)); + } else { + assertFalse(engineFactory.isPresent()); + } + } + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IncompatibleLicenseLocalStateCcr.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IncompatibleLicenseLocalStateCcr.java new file mode 100644 index 00000000000..c4b765d3c65 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IncompatibleLicenseLocalStateCcr.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; + +import java.nio.file.Path; + +public class IncompatibleLicenseLocalStateCcr extends LocalStateCompositeXPackPlugin { + + public IncompatibleLicenseLocalStateCcr(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + + plugins.add(new Ccr(settings, new CcrLicenseChecker(() -> false)) { + + @Override + protected XPackLicenseState getLicenseState() { + return IncompatibleLicenseLocalStateCcr.this.getLicenseState(); + } + + }); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalStateCcr.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalStateCcr.java new file mode 100644 index 00000000000..cfc30b8dfac --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/LocalStateCcr.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; + +import java.nio.file.Path; + +public class LocalStateCcr extends LocalStateCompositeXPackPlugin { + + public LocalStateCcr(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + + plugins.add(new Ccr(settings, new CcrLicenseChecker(() -> true)) { + + @Override + protected XPackLicenseState getLicenseState() { + return LocalStateCcr.this.getLicenseState(); + } + + }); + } + +} + diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java new file mode 100644 index 00000000000..07fc44072cb --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/ShardChangesIT.java @@ -0,0 +1,672 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr; + +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.bulk.BulkProcessor; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.analysis.common.CommonAnalysisPlugin; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.CheckedRunnable; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.MockHttpTransport; +import org.elasticsearch.test.discovery.TestZenDiscovery; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.ccr.action.CreateAndFollowIndexAction; +import org.elasticsearch.xpack.ccr.action.FollowIndexAction; +import org.elasticsearch.xpack.ccr.action.ShardChangesAction; +import org.elasticsearch.xpack.ccr.action.ShardFollowNodeTask; +import org.elasticsearch.xpack.ccr.action.ShardFollowTask; +import org.elasticsearch.xpack.ccr.action.UnfollowIndexAction; +import org.elasticsearch.xpack.core.XPackSettings; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, transportClientRatio = 0) +public class ShardChangesIT extends ESIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + Settings.Builder newSettings = Settings.builder(); + newSettings.put(super.nodeSettings(nodeOrdinal)); + newSettings.put(XPackSettings.SECURITY_ENABLED.getKey(), false); + newSettings.put(XPackSettings.MONITORING_ENABLED.getKey(), false); + newSettings.put(XPackSettings.WATCHER_ENABLED.getKey(), false); + newSettings.put(XPackSettings.MACHINE_LEARNING_ENABLED.getKey(), false); + newSettings.put(XPackSettings.LOGSTASH_ENABLED.getKey(), false); + return newSettings.build(); + } + + @Override + protected Collection> getMockPlugins() { + return Arrays.asList(TestSeedPlugin.class, TestZenDiscovery.TestPlugin.class, MockHttpTransport.TestPlugin.class); + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(LocalStateCcr.class, CommonAnalysisPlugin.class); + } + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + // this emulates what the CCR persistent task will do for pulling + public void testGetOperationsBasedOnGlobalSequenceId() throws Exception { + client().admin().indices().prepareCreate("index") + .setSettings(Settings.builder().put("index.number_of_shards", 1)) + .get(); + + client().prepareIndex("index", "doc", "1").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("index", "doc", "2").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("index", "doc", "3").setSource("{}", XContentType.JSON).get(); + + ShardStats shardStats = client().admin().indices().prepareStats("index").get().getIndex("index").getShards()[0]; + long globalCheckPoint = shardStats.getSeqNoStats().getGlobalCheckpoint(); + assertThat(globalCheckPoint, equalTo(2L)); + + ShardChangesAction.Request request = new ShardChangesAction.Request(shardStats.getShardRouting().shardId()); + request.setFromSeqNo(0L); + request.setMaxOperationCount(3); + ShardChangesAction.Response response = client().execute(ShardChangesAction.INSTANCE, request).get(); + assertThat(response.getOperations().length, equalTo(3)); + Translog.Index operation = (Translog.Index) response.getOperations()[0]; + assertThat(operation.seqNo(), equalTo(0L)); + assertThat(operation.id(), equalTo("1")); + + operation = (Translog.Index) response.getOperations()[1]; + assertThat(operation.seqNo(), equalTo(1L)); + assertThat(operation.id(), equalTo("2")); + + operation = (Translog.Index) response.getOperations()[2]; + assertThat(operation.seqNo(), equalTo(2L)); + assertThat(operation.id(), equalTo("3")); + + client().prepareIndex("index", "doc", "3").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("index", "doc", "4").setSource("{}", XContentType.JSON).get(); + client().prepareIndex("index", "doc", "5").setSource("{}", XContentType.JSON).get(); + + shardStats = client().admin().indices().prepareStats("index").get().getIndex("index").getShards()[0]; + globalCheckPoint = shardStats.getSeqNoStats().getGlobalCheckpoint(); + assertThat(globalCheckPoint, equalTo(5L)); + + request = new ShardChangesAction.Request(shardStats.getShardRouting().shardId()); + request.setFromSeqNo(3L); + request.setMaxOperationCount(3); + response = client().execute(ShardChangesAction.INSTANCE, request).get(); + assertThat(response.getOperations().length, equalTo(3)); + operation = (Translog.Index) response.getOperations()[0]; + assertThat(operation.seqNo(), equalTo(3L)); + assertThat(operation.id(), equalTo("3")); + + operation = (Translog.Index) response.getOperations()[1]; + assertThat(operation.seqNo(), equalTo(4L)); + assertThat(operation.id(), equalTo("4")); + + operation = (Translog.Index) response.getOperations()[2]; + assertThat(operation.seqNo(), equalTo(5L)); + assertThat(operation.id(), equalTo("5")); + } + + public void testFollowIndex() throws Exception { + final int numberOfPrimaryShards = randomIntBetween(1, 3); + final String leaderIndexSettings = getIndexSettings(numberOfPrimaryShards, between(0, 1), + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureYellow("index1"); + + final FollowIndexAction.Request followRequest = createFollowRequest("index1", "index2"); + final CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); + client().execute(CreateAndFollowIndexAction.INSTANCE, createAndFollowRequest).get(); + + final int firstBatchNumDocs = randomIntBetween(2, 64); + logger.info("Indexing [{}] docs as first batch", firstBatchNumDocs); + for (int i = 0; i < firstBatchNumDocs; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + client().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); + } + + final Map firstBatchNumDocsPerShard = new HashMap<>(); + final ShardStats[] firstBatchShardStats = client().admin().indices().prepareStats("index1").get().getIndex("index1").getShards(); + for (final ShardStats shardStats : firstBatchShardStats) { + if (shardStats.getShardRouting().primary()) { + long value = shardStats.getStats().getIndexing().getTotal().getIndexCount() - 1; + firstBatchNumDocsPerShard.put(shardStats.getShardRouting().shardId(), value); + } + } + + assertBusy(assertTask(numberOfPrimaryShards, firstBatchNumDocsPerShard)); + + for (int i = 0; i < firstBatchNumDocs; i++) { + assertBusy(assertExpectedDocumentRunnable(i)); + } + + unfollowIndex("index2"); + client().execute(FollowIndexAction.INSTANCE, followRequest).get(); + final int secondBatchNumDocs = randomIntBetween(2, 64); + logger.info("Indexing [{}] docs as second batch", secondBatchNumDocs); + for (int i = firstBatchNumDocs; i < firstBatchNumDocs + secondBatchNumDocs; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + client().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); + } + + final Map secondBatchNumDocsPerShard = new HashMap<>(); + final ShardStats[] secondBatchShardStats = client().admin().indices().prepareStats("index1").get().getIndex("index1").getShards(); + for (final ShardStats shardStats : secondBatchShardStats) { + if (shardStats.getShardRouting().primary()) { + final long value = shardStats.getStats().getIndexing().getTotal().getIndexCount() - 1; + secondBatchNumDocsPerShard.put(shardStats.getShardRouting().shardId(), value); + } + } + + assertBusy(assertTask(numberOfPrimaryShards, secondBatchNumDocsPerShard)); + + for (int i = firstBatchNumDocs; i < firstBatchNumDocs + secondBatchNumDocs; i++) { + assertBusy(assertExpectedDocumentRunnable(i)); + } + unfollowIndex("index2"); + } + + public void testSyncMappings() throws Exception { + final String leaderIndexSettings = getIndexSettings(2, between(0, 1), + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureYellow("index1"); + + final FollowIndexAction.Request followRequest = createFollowRequest("index1", "index2"); + final CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); + client().execute(CreateAndFollowIndexAction.INSTANCE, createAndFollowRequest).get(); + + final long firstBatchNumDocs = randomIntBetween(2, 64); + for (long i = 0; i < firstBatchNumDocs; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + client().prepareIndex("index1", "doc", Long.toString(i)).setSource(source, XContentType.JSON).get(); + } + + assertBusy(() -> assertThat(client().prepareSearch("index2").get().getHits().totalHits, equalTo(firstBatchNumDocs))); + MappingMetaData mappingMetaData = client().admin().indices().prepareGetMappings("index2").get().getMappings() + .get("index2").get("doc"); + assertThat(XContentMapValues.extractValue("properties.f.type", mappingMetaData.sourceAsMap()), equalTo("integer")); + assertThat(XContentMapValues.extractValue("properties.k", mappingMetaData.sourceAsMap()), nullValue()); + + final int secondBatchNumDocs = randomIntBetween(2, 64); + for (long i = firstBatchNumDocs; i < firstBatchNumDocs + secondBatchNumDocs; i++) { + final String source = String.format(Locale.ROOT, "{\"k\":%d}", i); + client().prepareIndex("index1", "doc", Long.toString(i)).setSource(source, XContentType.JSON).get(); + } + + assertBusy(() -> assertThat(client().prepareSearch("index2").get().getHits().totalHits, + equalTo(firstBatchNumDocs + secondBatchNumDocs))); + mappingMetaData = client().admin().indices().prepareGetMappings("index2").get().getMappings() + .get("index2").get("doc"); + assertThat(XContentMapValues.extractValue("properties.f.type", mappingMetaData.sourceAsMap()), equalTo("integer")); + assertThat(XContentMapValues.extractValue("properties.k.type", mappingMetaData.sourceAsMap()), equalTo("long")); + unfollowIndex("index2"); + } + + public void testFollowIndex_backlog() throws Exception { + String leaderIndexSettings = getIndexSettings(between(1, 5), between(0, 1), + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + BulkProcessor.Listener listener = new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) {} + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {} + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) {} + }; + BulkProcessor bulkProcessor = BulkProcessor.builder(client(), listener) + .setBulkActions(100) + .setConcurrentRequests(4) + .build(); + AtomicBoolean run = new AtomicBoolean(true); + Thread thread = new Thread(() -> { + int counter = 0; + while (run.get()) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", counter++); + IndexRequest indexRequest = new IndexRequest("index1", "doc") + .source(source, XContentType.JSON) + .timeout(TimeValue.timeValueSeconds(1)); + bulkProcessor.add(indexRequest); + } + }); + thread.start(); + + // Waiting for some document being index before following the index: + int maxReadSize = randomIntBetween(128, 2048); + long numDocsIndexed = Math.min(3000 * 2, randomLongBetween(maxReadSize, maxReadSize * 10)); + atLeastDocsIndexed("index1", numDocsIndexed / 3); + + final FollowIndexAction.Request followRequest = new FollowIndexAction.Request("index1", "index2", maxReadSize, + randomIntBetween(2, 10), Long.MAX_VALUE, randomIntBetween(2, 10), + randomIntBetween(1024, 10240), TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); + client().execute(CreateAndFollowIndexAction.INSTANCE, createAndFollowRequest).get(); + + atLeastDocsIndexed("index1", numDocsIndexed); + run.set(false); + thread.join(); + assertThat(bulkProcessor.awaitClose(1L, TimeUnit.MINUTES), is(true)); + + assertSameDocCount("index1", "index2"); + unfollowIndex("index2"); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33337") + public void testFollowIndexAndCloseNode() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + String leaderIndexSettings = getIndexSettings(3, 1, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + + String followerIndexSettings = getIndexSettings(3, 1, singletonMap(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index2").setSource(followerIndexSettings, XContentType.JSON)); + ensureGreen("index1", "index2"); + + AtomicBoolean run = new AtomicBoolean(true); + Thread thread = new Thread(() -> { + int counter = 0; + while (run.get()) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", counter++); + try { + client().prepareIndex("index1", "doc") + .setSource(source, XContentType.JSON) + .setTimeout(TimeValue.timeValueSeconds(1)) + .get(); + } catch (Exception e) { + logger.error("Error while indexing into leader index", e); + } + } + }); + thread.start(); + + final FollowIndexAction.Request followRequest = new FollowIndexAction.Request("index1", "index2", randomIntBetween(32, 2048), + randomIntBetween(2, 10), Long.MAX_VALUE, randomIntBetween(2, 10), + ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + client().execute(FollowIndexAction.INSTANCE, followRequest).get(); + + long maxNumDocsReplicated = Math.min(1000, randomLongBetween(followRequest.getMaxBatchOperationCount(), + followRequest.getMaxBatchOperationCount() * 10)); + long minNumDocsReplicated = maxNumDocsReplicated / 3L; + logger.info("waiting for at least [{}] documents to be indexed and then stop a random data node", minNumDocsReplicated); + atLeastDocsIndexed("index2", minNumDocsReplicated); + internalCluster().stopRandomNonMasterNode(); + logger.info("waiting for at least [{}] documents to be indexed", maxNumDocsReplicated); + atLeastDocsIndexed("index2", maxNumDocsReplicated); + run.set(false); + thread.join(); + + assertSameDocCount("index1", "index2"); + unfollowIndex("index2"); + } + + public void testFollowIndexWithNestedField() throws Exception { + final String leaderIndexSettings = + getIndexSettingsWithNestedMapping(1, between(0, 1), singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + + final String followerIndexSettings = + getIndexSettingsWithNestedMapping(1, between(0, 1), singletonMap(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index2").setSource(followerIndexSettings, XContentType.JSON)); + + internalCluster().ensureAtLeastNumDataNodes(2); + ensureGreen("index1", "index2"); + + final FollowIndexAction.Request followRequest = createFollowRequest("index1", "index2"); + client().execute(FollowIndexAction.INSTANCE, followRequest).get(); + + final int numDocs = randomIntBetween(2, 64); + for (int i = 0; i < numDocs; i++) { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + builder.field("field", "value"); + builder.startArray("objects"); + { + builder.startObject(); + builder.field("field", i); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + client().prepareIndex("index1", "doc", Integer.toString(i)).setSource(builder).get(); + } + } + + for (int i = 0; i < numDocs; i++) { + int value = i; + assertBusy(() -> { + final GetResponse getResponse = client().prepareGet("index2", "doc", Integer.toString(value)).get(); + assertTrue(getResponse.isExists()); + assertTrue((getResponse.getSource().containsKey("field"))); + assertThat(XContentMapValues.extractValue("objects.field", getResponse.getSource()), + equalTo(Collections.singletonList(value))); + }); + } + unfollowIndex("index2"); + } + + public void testUnfollowNonExistingIndex() { + UnfollowIndexAction.Request unfollowRequest = new UnfollowIndexAction.Request(); + unfollowRequest.setFollowIndex("non-existing-index"); + expectThrows(IllegalArgumentException.class, () -> client().execute(UnfollowIndexAction.INSTANCE, unfollowRequest).actionGet()); + } + + public void testFollowNonExistentIndex() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test-leader").get()); + assertAcked(client().admin().indices().prepareCreate("test-follower").get()); + // Leader index does not exist. + FollowIndexAction.Request followRequest1 = createFollowRequest("non-existent-leader", "test-follower"); + expectThrows(IllegalArgumentException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest1).actionGet()); + // Follower index does not exist. + FollowIndexAction.Request followRequest2 = createFollowRequest("non-test-leader", "non-existent-follower"); + expectThrows(IllegalArgumentException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest2).actionGet()); + // Both indices do not exist. + FollowIndexAction.Request followRequest3 = createFollowRequest("non-existent-leader", "non-existent-follower"); + expectThrows(IllegalArgumentException.class, () -> client().execute(FollowIndexAction.INSTANCE, followRequest3).actionGet()); + } + + @TestLogging("_root:DEBUG") + public void testValidateFollowingIndexSettings() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test-leader") + .setSettings(Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true))); + // TODO: indexing should be optional but the current mapping logic requires for now. + client().prepareIndex("test-leader", "doc", "id").setSource("{\"f\": \"v\"}", XContentType.JSON).get(); + assertAcked(client().admin().indices().prepareCreate("test-follower").get()); + IllegalArgumentException followError = expectThrows(IllegalArgumentException.class, () -> client().execute( + FollowIndexAction.INSTANCE, createFollowRequest("test-leader", "test-follower")).actionGet()); + assertThat(followError.getMessage(), equalTo("the following index [test-follower] is not ready to follow;" + + " the setting [index.xpack.ccr.following_index] must be enabled.")); + // updating the `following_index` with an open index must not be allowed. + IllegalArgumentException updateError = expectThrows(IllegalArgumentException.class, () -> { + client().admin().indices().prepareUpdateSettings("test-follower") + .setSettings(Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)).get(); + }); + assertThat(updateError.getMessage(), containsString("Can't update non dynamic settings " + + "[[index.xpack.ccr.following_index]] for open indices [[test-follower/")); + assertAcked(client().admin().indices().prepareClose("test-follower")); + assertAcked(client().admin().indices().prepareUpdateSettings("test-follower") + .setSettings(Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true))); + assertAcked(client().admin().indices().prepareOpen("test-follower")); + assertAcked(client().execute(FollowIndexAction.INSTANCE, + createFollowRequest("test-leader", "test-follower")).actionGet()); + unfollowIndex("test-follower"); + } + + public void testFollowIndex_lowMaxTranslogBytes() throws Exception { + final String leaderIndexSettings = getIndexSettings(1, between(0, 1), + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureYellow("index1"); + + final int numDocs = 1024; + logger.info("Indexing [{}] docs", numDocs); + for (int i = 0; i < numDocs; i++) { + final String source = String.format(Locale.ROOT, "{\"f\":%d}", i); + client().prepareIndex("index1", "doc", Integer.toString(i)).setSource(source, XContentType.JSON).get(); + } + + final FollowIndexAction.Request followRequest = new FollowIndexAction.Request("index1", "index2", 1024, 1, 1024L, + 1, 10240, TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(10)); + final CreateAndFollowIndexAction.Request createAndFollowRequest = new CreateAndFollowIndexAction.Request(followRequest); + client().execute(CreateAndFollowIndexAction.INSTANCE, createAndFollowRequest).get(); + + final Map firstBatchNumDocsPerShard = new HashMap<>(); + final ShardStats[] firstBatchShardStats = client().admin().indices().prepareStats("index1").get().getIndex("index1").getShards(); + for (final ShardStats shardStats : firstBatchShardStats) { + if (shardStats.getShardRouting().primary()) { + long value = shardStats.getStats().getIndexing().getTotal().getIndexCount() - 1; + firstBatchNumDocsPerShard.put(shardStats.getShardRouting().shardId(), value); + } + } + + assertBusy(assertTask(1, firstBatchNumDocsPerShard)); + for (int i = 0; i < numDocs; i++) { + assertBusy(assertExpectedDocumentRunnable(i)); + } + unfollowIndex("index2"); + } + + private CheckedRunnable assertTask(final int numberOfPrimaryShards, final Map numDocsPerShard) { + return () -> { + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + final PersistentTasksCustomMetaData taskMetadata = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + + ListTasksRequest listTasksRequest = new ListTasksRequest(); + listTasksRequest.setDetailed(true); + listTasksRequest.setActions(ShardFollowTask.NAME + "[c]"); + ListTasksResponse listTasksResponse = client().admin().cluster().listTasks(listTasksRequest).actionGet(); + assertThat(listTasksResponse.getNodeFailures().size(), equalTo(0)); + assertThat(listTasksResponse.getTaskFailures().size(), equalTo(0)); + + List taskInfos = listTasksResponse.getTasks(); + assertThat(taskInfos.size(), equalTo(numberOfPrimaryShards)); + Collection> shardFollowTasks = + taskMetadata.findTasks(ShardFollowTask.NAME, Objects::nonNull); + for (PersistentTasksCustomMetaData.PersistentTask shardFollowTask : shardFollowTasks) { + final ShardFollowTask shardFollowTaskParams = (ShardFollowTask) shardFollowTask.getParams(); + TaskInfo taskInfo = null; + String expectedId = "id=" + shardFollowTask.getId(); + for (TaskInfo info : taskInfos) { + if (expectedId.equals(info.getDescription())) { + taskInfo = info; + break; + } + } + assertThat(taskInfo, notNullValue()); + ShardFollowNodeTask.Status status = (ShardFollowNodeTask.Status) taskInfo.getStatus(); + assertThat(status, notNullValue()); + assertThat("incorrect global checkpoint " + shardFollowTaskParams, + status.followerGlobalCheckpoint(), + equalTo(numDocsPerShard.get(shardFollowTaskParams.getLeaderShardId()))); + } + }; + } + + private void unfollowIndex(String index) throws Exception { + final UnfollowIndexAction.Request unfollowRequest = new UnfollowIndexAction.Request(); + unfollowRequest.setFollowIndex(index); + client().execute(UnfollowIndexAction.INSTANCE, unfollowRequest).get(); + assertBusy(() -> { + final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + final PersistentTasksCustomMetaData tasks = clusterState.getMetaData().custom(PersistentTasksCustomMetaData.TYPE); + assertThat(tasks.tasks().size(), equalTo(0)); + + ListTasksRequest listTasksRequest = new ListTasksRequest(); + listTasksRequest.setDetailed(true); + ListTasksResponse listTasksResponse = client().admin().cluster().listTasks(listTasksRequest).get(); + int numNodeTasks = 0; + for (TaskInfo taskInfo : listTasksResponse.getTasks()) { + if (taskInfo.getAction().startsWith(ListTasksAction.NAME) == false) { + numNodeTasks++; + } + } + assertThat(numNodeTasks, equalTo(0)); + }, 30, TimeUnit.SECONDS); + } + + private CheckedRunnable assertExpectedDocumentRunnable(final int value) { + return () -> { + final GetResponse getResponse = client().prepareGet("index2", "doc", Integer.toString(value)).get(); + assertTrue("Doc with id [" + value + "] is missing", getResponse.isExists()); + assertTrue((getResponse.getSource().containsKey("f"))); + assertThat(getResponse.getSource().get("f"), equalTo(value)); + }; + } + + private String getIndexSettings(final int numberOfShards, final int numberOfReplicas, + final Map additionalIndexSettings) throws IOException { + final String settings; + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("settings"); + { + builder.field("index.number_of_shards", numberOfShards); + builder.field("index.number_of_replicas", numberOfReplicas); + for (final Map.Entry additionalSetting : additionalIndexSettings.entrySet()) { + builder.field(additionalSetting.getKey(), additionalSetting.getValue()); + } + } + builder.endObject(); + builder.startObject("mappings"); + { + builder.startObject("doc"); + { + builder.startObject("properties"); + { + builder.startObject("f"); + { + builder.field("type", "integer"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + settings = BytesReference.bytes(builder).utf8ToString(); + } + return settings; + } + + private String getIndexSettingsWithNestedMapping(final int numberOfShards, final int numberOfReplicas, + final Map additionalIndexSettings) throws IOException { + final String settings; + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.startObject("settings"); + { + builder.field("index.number_of_shards", numberOfShards); + builder.field("index.number_of_replicas", numberOfReplicas); + for (final Map.Entry additionalSetting : additionalIndexSettings.entrySet()) { + builder.field(additionalSetting.getKey(), additionalSetting.getValue()); + } + } + builder.endObject(); + builder.startObject("mappings"); + { + builder.startObject("doc"); + { + builder.startObject("properties"); + { + builder.startObject("objects"); + { + builder.field("type", "nested"); + builder.startObject("properties"); + { + builder.startObject("field"); + { + builder.field("type", "long"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + builder.startObject("field"); + { + builder.field("type", "keyword"); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + settings = BytesReference.bytes(builder).utf8ToString(); + } + return settings; + } + + private void atLeastDocsIndexed(String index, long numDocsReplicated) throws InterruptedException { + logger.info("waiting for at least [{}] documents to be indexed into index [{}]", numDocsReplicated, index); + awaitBusy(() -> { + refresh(index); + SearchRequest request = new SearchRequest(index); + request.source(new SearchSourceBuilder().size(0)); + SearchResponse response = client().search(request).actionGet(); + return response.getHits().getTotalHits() >= numDocsReplicated; + }, 60, TimeUnit.SECONDS); + } + + private void assertSameDocCount(String index1, String index2) throws Exception { + refresh(index1); + SearchRequest request1 = new SearchRequest(index1); + request1.source(new SearchSourceBuilder().size(0)); + SearchResponse response1 = client().search(request1).actionGet(); + assertBusy(() -> { + refresh(index2); + SearchRequest request2 = new SearchRequest(index2); + request2.source(new SearchSourceBuilder().size(0)); + SearchResponse response2 = client().search(request2).actionGet(); + assertThat(response2.getHits().getTotalHits(), equalTo(response1.getHits().getTotalHits())); + }, 60, TimeUnit.SECONDS); + } + + public static FollowIndexAction.Request createFollowRequest(String leaderIndex, String followIndex) { + return new FollowIndexAction.Request(leaderIndex, followIndex, ShardFollowNodeTask.DEFAULT_MAX_BATCH_OPERATION_COUNT, + ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_READ_BATCHES, ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, + ShardFollowNodeTask.DEFAULT_MAX_CONCURRENT_WRITE_BATCHES, ShardFollowNodeTask.DEFAULT_MAX_WRITE_BUFFER_SIZE, + TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10)); + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexRequestTests.java new file mode 100644 index 00000000000..c68d1849965 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexRequestTests.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class CreateAndFollowIndexRequestTests extends AbstractStreamableTestCase { + + @Override + protected CreateAndFollowIndexAction.Request createBlankInstance() { + return new CreateAndFollowIndexAction.Request(); + } + + @Override + protected CreateAndFollowIndexAction.Request createTestInstance() { + return new CreateAndFollowIndexAction.Request(FollowIndexRequestTests.createTestRequest()); + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexResponseTests.java new file mode 100644 index 00000000000..11a518ef067 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/CreateAndFollowIndexResponseTests.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class CreateAndFollowIndexResponseTests extends AbstractStreamableTestCase { + + @Override + protected CreateAndFollowIndexAction.Response createBlankInstance() { + return new CreateAndFollowIndexAction.Response(); + } + + @Override + protected CreateAndFollowIndexAction.Response createTestInstance() { + return new CreateAndFollowIndexAction.Response(randomBoolean(), randomBoolean(), randomBoolean()); + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexActionTests.java new file mode 100644 index 00000000000..5b52700f557 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexActionTests.java @@ -0,0 +1,170 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexMetaData.State; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MapperTestUtils; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ccr.CcrSettings; +import org.elasticsearch.xpack.ccr.ShardChangesIT; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class FollowIndexActionTests extends ESTestCase { + + public void testValidation() throws IOException { + FollowIndexAction.Request request = ShardChangesIT.createFollowRequest("index1", "index2"); + { + // should fail, because leader index does not exist + Exception e = expectThrows(IllegalArgumentException.class, () -> FollowIndexAction.validate(request, null, null, null)); + assertThat(e.getMessage(), equalTo("leader index [index1] does not exist")); + } + { + // should fail, because follow index does not exist + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY); + Exception e = expectThrows(IllegalArgumentException.class, () -> FollowIndexAction.validate(request, leaderIMD, null, null)); + assertThat(e.getMessage(), equalTo("follow index [index2] does not exist")); + } + { + // should fail because leader index does not have soft deletes enabled + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.EMPTY); + IndexMetaData followIMD = createIMD("index2", 5, Settings.EMPTY); + Exception e = expectThrows(IllegalArgumentException.class, + () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + assertThat(e.getMessage(), equalTo("leader index [index1] does not have soft deletes enabled")); + } + { + // should fail because the number of primary shards between leader and follow index are not equal + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + IndexMetaData followIMD = createIMD("index2", 4, Settings.EMPTY); + Exception e = expectThrows(IllegalArgumentException.class, + () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + assertThat(e.getMessage(), + equalTo("leader index primary shards [5] does not match with the number of shards of the follow index [4]")); + } + { + // should fail, because leader index is closed + IndexMetaData leaderIMD = createIMD("index1", State.CLOSE, "{}", 5, Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + IndexMetaData followIMD = createIMD("index2", State.OPEN, "{}", 5, Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + Exception e = expectThrows(IllegalArgumentException.class, + () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + assertThat(e.getMessage(), equalTo("leader and follow index must be open")); + } + { + // should fail, because leader has a field with the same name mapped as keyword and follower as text + IndexMetaData leaderIMD = createIMD("index1", State.OPEN, "{\"properties\": {\"field\": {\"type\": \"keyword\"}}}", 5, + Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + IndexMetaData followIMD = createIMD("index2", State.OPEN, "{\"properties\": {\"field\": {\"type\": \"text\"}}}", 5, + Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build()); + MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2"); + mapperService.updateMapping(null, followIMD); + Exception e = expectThrows(IllegalArgumentException.class, + () -> FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService)); + assertThat(e.getMessage(), equalTo("mapper [field] of different type, current_type [text], merged_type [keyword]")); + } + { + // should fail because of non whitelisted settings not the same between leader and follow index + String mapping = "{\"properties\": {\"field\": {\"type\": \"text\", \"analyzer\": \"my_analyzer\"}}}"; + IndexMetaData leaderIMD = createIMD("index1", State.OPEN, mapping, 5, Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true") + .put("index.analysis.analyzer.my_analyzer.type", "custom") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "whitespace").build()); + IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder() + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) + .put("index.analysis.analyzer.my_analyzer.type", "custom") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); + Exception e = expectThrows(IllegalArgumentException.class, + () -> FollowIndexAction.validate(request, leaderIMD, followIMD, null)); + assertThat(e.getMessage(), equalTo("the leader and follower index settings must be identical")); + } + { + // should fail because the following index does not have the following_index settings + IndexMetaData leaderIMD = createIMD("index1", 5, + Settings.builder().put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + Settings followingIndexSettings = randomBoolean() ? Settings.EMPTY : + Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), false).build(); + IndexMetaData followIMD = createIMD("index2", 5, followingIndexSettings); + MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), + followingIndexSettings, "index2"); + mapperService.updateMapping(null, followIMD); + IllegalArgumentException error = expectThrows(IllegalArgumentException.class, + () -> FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService)); + assertThat(error.getMessage(), equalTo("the following index [index2] is not ready to follow; " + + "the setting [index.xpack.ccr.following_index] must be enabled.")); + } + { + // should succeed + IndexMetaData leaderIMD = createIMD("index1", 5, Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true").build()); + IndexMetaData followIMD = createIMD("index2", 5, Settings.builder() + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build()); + MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), Settings.EMPTY, "index2"); + mapperService.updateMapping(null, followIMD); + FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); + } + { + // should succeed, index settings are identical + String mapping = "{\"properties\": {\"field\": {\"type\": \"text\", \"analyzer\": \"my_analyzer\"}}}"; + IndexMetaData leaderIMD = createIMD("index1", State.OPEN, mapping, 5, Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true") + .put("index.analysis.analyzer.my_analyzer.type", "custom") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); + IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder() + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) + .put("index.analysis.analyzer.my_analyzer.type", "custom") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); + MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), + followIMD.getSettings(), "index2"); + mapperService.updateMapping(null, followIMD); + FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); + } + { + // should succeed despite whitelisted settings being different + String mapping = "{\"properties\": {\"field\": {\"type\": \"text\", \"analyzer\": \"my_analyzer\"}}}"; + IndexMetaData leaderIMD = createIMD("index1", State.OPEN, mapping, 5, Settings.builder() + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true") + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + .put("index.analysis.analyzer.my_analyzer.type", "custom") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); + IndexMetaData followIMD = createIMD("index2", State.OPEN, mapping, 5, Settings.builder() + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "10s") + .put("index.analysis.analyzer.my_analyzer.type", "custom") + .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard").build()); + MapperService mapperService = MapperTestUtils.newMapperService(xContentRegistry(), createTempDir(), + followIMD.getSettings(), "index2"); + mapperService.updateMapping(null, followIMD); + FollowIndexAction.validate(request, leaderIMD, followIMD, mapperService); + } + } + + private static IndexMetaData createIMD(String index, int numberOfShards, Settings settings) throws IOException { + return createIMD(index, State.OPEN, "{\"properties\": {}}", numberOfShards, settings); + } + + private static IndexMetaData createIMD(String index, State state, String mapping, int numberOfShards, + Settings settings) throws IOException { + return IndexMetaData.builder(index) + .settings(settings(Version.CURRENT).put(settings)) + .numberOfShards(numberOfShards) + .state(state) + .numberOfReplicas(0) + .setRoutingNumShards(numberOfShards) + .putMapping("_doc", mapping) + .build(); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java new file mode 100644 index 00000000000..7202f7202c6 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/FollowIndexRequestTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +import java.io.IOException; + +public class FollowIndexRequestTests extends AbstractStreamableXContentTestCase { + + @Override + protected FollowIndexAction.Request createBlankInstance() { + return new FollowIndexAction.Request(); + } + + @Override + protected FollowIndexAction.Request createTestInstance() { + return createTestRequest(); + } + + @Override + protected FollowIndexAction.Request doParseInstance(XContentParser parser) throws IOException { + return FollowIndexAction.Request.fromXContent(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + static FollowIndexAction.Request createTestRequest() { + return new FollowIndexAction.Request(randomAlphaOfLength(4), randomAlphaOfLength(4), randomIntBetween(1, Integer.MAX_VALUE), + randomIntBetween(1, Integer.MAX_VALUE), randomNonNegativeLong(), randomIntBetween(1, Integer.MAX_VALUE), + randomIntBetween(1, Integer.MAX_VALUE), TimeValue.timeValueMillis(500), TimeValue.timeValueMillis(500)); + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java new file mode 100644 index 00000000000..430e9cb48b1 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesActionTests.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardNotStartedException; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.mockito.Mockito; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class ShardChangesActionTests extends ESSingleNodeTestCase { + + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + public void testGetOperations() throws Exception { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .build(); + final IndexService indexService = createIndex("index", settings); + + final int numWrites = randomIntBetween(2, 4096); + for (int i = 0; i < numWrites; i++) { + client().prepareIndex("index", "doc", Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + } + + // A number of times, get operations within a range that exists: + int iters = randomIntBetween(8, 32); + IndexShard indexShard = indexService.getShard(0); + for (int iter = 0; iter < iters; iter++) { + int min = randomIntBetween(0, numWrites - 1); + int max = randomIntBetween(min, numWrites - 1); + int size = max - min + 1; + final Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, + indexShard.getGlobalCheckpoint(), min, size, Long.MAX_VALUE); + final List seenSeqNos = Arrays.stream(operations).map(Translog.Operation::seqNo).collect(Collectors.toList()); + final List expectedSeqNos = LongStream.rangeClosed(min, max).boxed().collect(Collectors.toList()); + assertThat(seenSeqNos, equalTo(expectedSeqNos)); + } + + + // get operations for a range no operations exists: + Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), + numWrites, numWrites + 1, Long.MAX_VALUE); + assertThat(operations.length, equalTo(0)); + + // get operations for a range some operations do not exist: + operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), + numWrites - 10, numWrites + 10, Long.MAX_VALUE); + assertThat(operations.length, equalTo(10)); + } + + public void testGetOperationsWhenShardNotStarted() throws Exception { + IndexShard indexShard = Mockito.mock(IndexShard.class); + + ShardRouting shardRouting = TestShardRouting.newShardRouting("index", 0, "_node_id", true, ShardRoutingState.INITIALIZING); + Mockito.when(indexShard.routingEntry()).thenReturn(shardRouting); + expectThrows(IndexShardNotStartedException.class, () -> ShardChangesAction.getOperations(indexShard, + indexShard.getGlobalCheckpoint(), 0, 1, Long.MAX_VALUE)); + } + + public void testGetOperationsExceedByteLimit() throws Exception { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .build(); + final IndexService indexService = createIndex("index", settings); + + final long numWrites = 32; + for (int i = 0; i < numWrites; i++) { + client().prepareIndex("index", "doc", Integer.toString(i)).setSource("{}", XContentType.JSON).get(); + } + + final IndexShard indexShard = indexService.getShard(0); + final Translog.Operation[] operations = ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), + 0, 12, 256); + assertThat(operations.length, equalTo(12)); + assertThat(operations[0].seqNo(), equalTo(0L)); + assertThat(operations[1].seqNo(), equalTo(1L)); + assertThat(operations[2].seqNo(), equalTo(2L)); + assertThat(operations[3].seqNo(), equalTo(3L)); + assertThat(operations[4].seqNo(), equalTo(4L)); + assertThat(operations[5].seqNo(), equalTo(5L)); + assertThat(operations[6].seqNo(), equalTo(6L)); + assertThat(operations[7].seqNo(), equalTo(7L)); + assertThat(operations[8].seqNo(), equalTo(8L)); + assertThat(operations[9].seqNo(), equalTo(9L)); + assertThat(operations[10].seqNo(), equalTo(10L)); + assertThat(operations[11].seqNo(), equalTo(11L)); + } + + public void testGetOperationsAlwaysReturnAtLeastOneOp() throws Exception { + final Settings settings = Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .build(); + final IndexService indexService = createIndex("index", settings); + + client().prepareIndex("index", "doc", "0").setSource("{}", XContentType.JSON).get(); + + final IndexShard indexShard = indexService.getShard(0); + final Translog.Operation[] operations = + ShardChangesAction.getOperations(indexShard, indexShard.getGlobalCheckpoint(), 0, 1, 0); + assertThat(operations.length, equalTo(1)); + assertThat(operations[0].seqNo(), equalTo(0L)); + } + + public void testIndexNotFound() throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference reference = new AtomicReference<>(); + final ShardChangesAction.TransportAction transportAction = node().injector().getInstance(ShardChangesAction.TransportAction.class); + transportAction.execute( + new ShardChangesAction.Request(new ShardId(new Index("non-existent", "uuid"), 0)), + new ActionListener() { + @Override + public void onResponse(final ShardChangesAction.Response response) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + reference.set(e); + latch.countDown(); + } + }); + latch.await(); + assertNotNull(reference.get()); + assertThat(reference.get(), instanceOf(IndexNotFoundException.class)); + } + + public void testShardNotFound() throws InterruptedException { + final int numberOfShards = randomIntBetween(1, 5); + final IndexService indexService = createIndex("index", Settings.builder().put("index.number_of_shards", numberOfShards).build()); + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference reference = new AtomicReference<>(); + final ShardChangesAction.TransportAction transportAction = node().injector().getInstance(ShardChangesAction.TransportAction.class); + transportAction.execute( + new ShardChangesAction.Request(new ShardId(indexService.getMetaData().getIndex(), numberOfShards)), + new ActionListener() { + @Override + public void onResponse(final ShardChangesAction.Response response) { + fail(); + } + + @Override + public void onFailure(final Exception e) { + reference.set(e); + latch.countDown(); + } + }); + latch.await(); + assertNotNull(reference.get()); + assertThat(reference.get(), instanceOf(ShardNotFoundException.class)); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesRequestTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesRequestTests.java new file mode 100644 index 00000000000..19585da8851 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesRequestTests.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.AbstractStreamableTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.nullValue; + +public class ShardChangesRequestTests extends AbstractStreamableTestCase { + + @Override + protected ShardChangesAction.Request createTestInstance() { + ShardChangesAction.Request request = new ShardChangesAction.Request(new ShardId("_index", "_indexUUID", 0)); + request.setMaxOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + request.setFromSeqNo(randomNonNegativeLong()); + return request; + } + + @Override + protected ShardChangesAction.Request createBlankInstance() { + return new ShardChangesAction.Request(); + } + + public void testValidate() { + ShardChangesAction.Request request = new ShardChangesAction.Request(new ShardId("_index", "_indexUUID", 0)); + request.setFromSeqNo(-1); + assertThat(request.validate().getMessage(), containsString("fromSeqNo [-1] cannot be lower than 0")); + + request.setFromSeqNo(0); + request.setMaxOperationCount(-1); + assertThat(request.validate().getMessage(), containsString("maxOperationCount [-1] cannot be lower than 0")); + + request.setMaxOperationCount(8); + assertThat(request.validate(), nullValue()); + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java new file mode 100644 index 00000000000..e9c67097d72 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardChangesResponseTests.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.test.AbstractStreamableTestCase; + +public class ShardChangesResponseTests extends AbstractStreamableTestCase { + + @Override + protected ShardChangesAction.Response createTestInstance() { + final long mappingVersion = randomNonNegativeLong(); + final long leaderGlobalCheckpoint = randomNonNegativeLong(); + final long leaderMaxSeqNo = randomLongBetween(leaderGlobalCheckpoint, Long.MAX_VALUE); + final int numOps = randomInt(8); + final Translog.Operation[] operations = new Translog.Operation[numOps]; + for (int i = 0; i < numOps; i++) { + operations[i] = new Translog.NoOp(i, 0, "test"); + } + return new ShardChangesAction.Response(mappingVersion, leaderGlobalCheckpoint, leaderMaxSeqNo, operations); + } + + @Override + protected ShardChangesAction.Response createBlankInstance() { + return new ShardChangesAction.Response(); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java new file mode 100644 index 00000000000..f5fe1215a0e --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java @@ -0,0 +1,296 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.action.UnavailableShardsException; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.seqno.LocalCheckpointTracker; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.LongConsumer; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class ShardFollowNodeTaskRandomTests extends ESTestCase { + + public void testSingleReaderWriter() throws Exception { + TestRun testRun = createTestRun(randomNonNegativeLong(), randomNonNegativeLong(), randomIntBetween(1, 2048)); + ShardFollowNodeTask task = createShardFollowTask(1, testRun); + startAndAssertAndStopTask(task, testRun); + } + + public void testMultipleReaderWriter() throws Exception { + int concurrency = randomIntBetween(2, 8); + TestRun testRun = createTestRun(0, 0, between(1, 1024)); + ShardFollowNodeTask task = createShardFollowTask(concurrency, testRun); + startAndAssertAndStopTask(task, testRun); + } + + private void startAndAssertAndStopTask(ShardFollowNodeTask task, TestRun testRun) throws Exception { + task.start(testRun.startSeqNo - 1, testRun.startSeqNo - 1, testRun.startSeqNo - 1, testRun.startSeqNo - 1); + assertBusy(() -> { + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.leaderGlobalCheckpoint(), equalTo(testRun.finalExpectedGlobalCheckpoint)); + assertThat(status.followerGlobalCheckpoint(), equalTo(testRun.finalExpectedGlobalCheckpoint)); + final long numberOfFailedFetches = + testRun.responses.values().stream().flatMap(List::stream).filter(f -> f.exception != null).count(); + assertThat(status.numberOfFailedFetches(), equalTo(numberOfFailedFetches)); + // the failures were able to be retried so fetch failures should have cleared + assertThat(status.fetchExceptions().entrySet(), hasSize(0)); + assertThat(status.mappingVersion(), equalTo(testRun.finalMappingVersion)); + }); + + task.markAsCompleted(); + assertBusy(() -> { + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(0)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + }); + } + + private ShardFollowNodeTask createShardFollowTask(int concurrency, TestRun testRun) { + AtomicBoolean stopped = new AtomicBoolean(false); + ShardFollowTask params = new ShardFollowTask(null, new ShardId("follow_index", "", 0), + new ShardId("leader_index", "", 0), testRun.maxOperationCount, concurrency, + ShardFollowNodeTask.DEFAULT_MAX_BATCH_SIZE_IN_BYTES, concurrency, 10240, + TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap()); + + ThreadPool threadPool = new TestThreadPool(getClass().getSimpleName()); + BiConsumer scheduler = (delay, task) -> { + assert delay.millis() < 100 : "The delay should be kept to a minimum, so that this test does not take to long to run"; + threadPool.schedule(delay, ThreadPool.Names.GENERIC, task); + }; + List receivedOperations = Collections.synchronizedList(new ArrayList<>()); + LocalCheckpointTracker tracker = new LocalCheckpointTracker(testRun.startSeqNo - 1, testRun.startSeqNo - 1); + return new ShardFollowNodeTask( + 1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), params, scheduler, System::nanoTime) { + + private volatile long mappingVersion = 0L; + private final Map fromToSlot = new HashMap<>(); + + @Override + protected void innerUpdateMapping(LongConsumer handler, Consumer errorHandler) { + handler.accept(mappingVersion); + } + + @Override + protected void innerSendBulkShardOperationsRequest( + List operations, + Consumer handler, + Consumer errorHandler) { + for(Translog.Operation op : operations) { + tracker.markSeqNoAsCompleted(op.seqNo()); + } + receivedOperations.addAll(operations); + + // Emulate network thread and avoid SO: + final BulkShardOperationsResponse response = new BulkShardOperationsResponse(); + response.setGlobalCheckpoint(tracker.getCheckpoint()); + response.setMaxSeqNo(tracker.getMaxSeqNo()); + threadPool.generic().execute(() -> handler.accept(response)); + } + + @Override + protected void innerSendShardChangesRequest(long from, int maxOperationCount, Consumer handler, + Consumer errorHandler) { + + // Emulate network thread and avoid SO: + Runnable task = () -> { + List items = testRun.responses.get(from); + if (items != null) { + final TestResponse testResponse; + synchronized (fromToSlot) { + int slot; + if (fromToSlot.get(from) == null) { + slot = fromToSlot.getOrDefault(from, 0); + fromToSlot.put(from, slot); + } else { + slot = fromToSlot.get(from); + } + testResponse = items.get(slot); + fromToSlot.put(from, ++slot); + // if too many invocations occur with the same from then AOBE occurs, this ok and then something is wrong. + } + mappingVersion = testResponse.mappingVersion; + if (testResponse.exception != null) { + errorHandler.accept(testResponse.exception); + } else { + handler.accept(testResponse.response); + } + } else { + assert from >= testRun.finalExpectedGlobalCheckpoint; + final long globalCheckpoint = tracker.getCheckpoint(); + final long maxSeqNo = tracker.getMaxSeqNo(); + handler.accept(new ShardChangesAction.Response(0L,globalCheckpoint, maxSeqNo, new Translog.Operation[0])); + } + }; + threadPool.generic().execute(task); + } + + @Override + protected boolean isStopped() { + return stopped.get(); + } + + @Override + public void markAsCompleted() { + stopped.set(true); + tearDown(); + } + + @Override + public void markAsFailed(Exception e) { + stopped.set(true); + tearDown(); + } + + private void tearDown() { + threadPool.shutdown(); + List expectedOperations = testRun.responses.values().stream() + .flatMap(List::stream) + .map(testResponse -> testResponse.response) + .filter(Objects::nonNull) + .flatMap(response -> Arrays.stream(response.getOperations())) + .sorted(Comparator.comparingLong(Translog.Operation::seqNo)) + .collect(Collectors.toList()); + assertThat(receivedOperations.size(), equalTo(expectedOperations.size())); + receivedOperations.sort(Comparator.comparingLong(Translog.Operation::seqNo)); + for (int i = 0; i < receivedOperations.size(); i++) { + Translog.Operation actual = receivedOperations.get(i); + Translog.Operation expected = expectedOperations.get(i); + assertThat(actual, equalTo(expected)); + } + } + }; + } + + private static TestRun createTestRun(long startSeqNo, long startMappingVersion, int maxOperationCount) { + long prevGlobalCheckpoint = startSeqNo; + long mappingVersion = startMappingVersion; + int numResponses = randomIntBetween(16, 256); + Map> responses = new HashMap<>(numResponses); + for (int i = 0; i < numResponses; i++) { + long nextGlobalCheckPoint = prevGlobalCheckpoint + maxOperationCount; + if (sometimes()) { + mappingVersion++; + } + + if (sometimes()) { + List item = new ArrayList<>(); + // Sometimes add a random retryable error + if (sometimes()) { + Exception error = new UnavailableShardsException(new ShardId("test", "test", 0), ""); + item.add(new TestResponse(error, mappingVersion, null)); + } + List ops = new ArrayList<>(); + for (long seqNo = prevGlobalCheckpoint; seqNo <= nextGlobalCheckPoint; seqNo++) { + String id = UUIDs.randomBase64UUID(); + byte[] source = "{}".getBytes(StandardCharsets.UTF_8); + ops.add(new Translog.Index("doc", id, seqNo, 0, source)); + } + item.add(new TestResponse(null, mappingVersion, + new ShardChangesAction.Response(mappingVersion, nextGlobalCheckPoint, nextGlobalCheckPoint, ops.toArray(EMPTY)))); + responses.put(prevGlobalCheckpoint, item); + } else { + // Simulates a leader shard copy not having all the operations the shard follow task thinks it has by + // splitting up a response into multiple responses AND simulates maxBatchSizeInBytes limit being reached: + long toSeqNo; + for (long fromSeqNo = prevGlobalCheckpoint; fromSeqNo <= nextGlobalCheckPoint; fromSeqNo = toSeqNo + 1) { + toSeqNo = randomLongBetween(fromSeqNo, nextGlobalCheckPoint); + List item = new ArrayList<>(); + // Sometimes add a random retryable error + if (sometimes()) { + Exception error = new UnavailableShardsException(new ShardId("test", "test", 0), ""); + item.add(new TestResponse(error, mappingVersion, null)); + } + // Sometimes add an empty shard changes response to also simulate a leader shard lagging behind + if (sometimes()) { + ShardChangesAction.Response response = + new ShardChangesAction.Response(mappingVersion, prevGlobalCheckpoint, prevGlobalCheckpoint, EMPTY); + item.add(new TestResponse(null, mappingVersion, response)); + } + List ops = new ArrayList<>(); + for (long seqNo = fromSeqNo; seqNo <= toSeqNo; seqNo++) { + String id = UUIDs.randomBase64UUID(); + byte[] source = "{}".getBytes(StandardCharsets.UTF_8); + ops.add(new Translog.Index("doc", id, seqNo, 0, source)); + } + // Report toSeqNo to simulate maxBatchSizeInBytes limit being met or last op to simulate a shard lagging behind: + long localLeaderGCP = randomBoolean() ? ops.get(ops.size() - 1).seqNo() : toSeqNo; + ShardChangesAction.Response response = + new ShardChangesAction.Response(mappingVersion, localLeaderGCP, localLeaderGCP, ops.toArray(EMPTY)); + item.add(new TestResponse(null, mappingVersion, response)); + responses.put(fromSeqNo, Collections.unmodifiableList(item)); + } + } + prevGlobalCheckpoint = nextGlobalCheckPoint + 1; + } + return new TestRun(maxOperationCount, startSeqNo, startMappingVersion, mappingVersion, + prevGlobalCheckpoint - 1, responses); + } + + // Instead of rarely(), which returns true very rarely especially not running in nightly mode or a multiplier have not been set + private static boolean sometimes() { + return randomIntBetween(0, 10) == 5; + } + + private static class TestRun { + + final int maxOperationCount; + final long startSeqNo; + final long startMappingVersion; + + final long finalMappingVersion; + final long finalExpectedGlobalCheckpoint; + final Map> responses; + + private TestRun(int maxOperationCount, long startSeqNo, long startMappingVersion, long finalMappingVersion, + long finalExpectedGlobalCheckpoint, Map> responses) { + this.maxOperationCount = maxOperationCount; + this.startSeqNo = startSeqNo; + this.startMappingVersion = startMappingVersion; + this.finalMappingVersion = finalMappingVersion; + this.finalExpectedGlobalCheckpoint = finalExpectedGlobalCheckpoint; + this.responses = Collections.unmodifiableMap(responses); + } + } + + private static class TestResponse { + + final Exception exception; + final long mappingVersion; + final ShardChangesAction.Response response; + + private TestResponse(Exception exception, long mappingVersion, ShardChangesAction.Response response) { + this.exception = exception; + this.mappingVersion = mappingVersion; + this.response = response; + } + } + + private static final Translog.Operation[] EMPTY = new Translog.Operation[0]; + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java new file mode 100644 index 00000000000..8368a818e00 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskStatusTests.java @@ -0,0 +1,115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.Map; +import java.util.NavigableMap; +import java.util.TreeMap; + +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class ShardFollowNodeTaskStatusTests extends AbstractSerializingTestCase { + + @Override + protected ShardFollowNodeTask.Status doParseInstance(XContentParser parser) throws IOException { + return ShardFollowNodeTask.Status.fromXContent(parser); + } + + @Override + protected ShardFollowNodeTask.Status createTestInstance() { + // if you change this constructor, reflect the changes in the hand-written assertions below + return new ShardFollowNodeTask.Status( + randomAlphaOfLength(4), + randomInt(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + randomIntBetween(0, Integer.MAX_VALUE), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomReadExceptions(), + randomLong()); + } + + @Override + protected void assertEqualInstances(final ShardFollowNodeTask.Status expectedInstance, final ShardFollowNodeTask.Status newInstance) { + assertNotSame(expectedInstance, newInstance); + assertThat(newInstance.leaderIndex(), equalTo(expectedInstance.leaderIndex())); + assertThat(newInstance.getShardId(), equalTo(expectedInstance.getShardId())); + assertThat(newInstance.leaderGlobalCheckpoint(), equalTo(expectedInstance.leaderGlobalCheckpoint())); + assertThat(newInstance.leaderMaxSeqNo(), equalTo(expectedInstance.leaderMaxSeqNo())); + assertThat(newInstance.followerGlobalCheckpoint(), equalTo(expectedInstance.followerGlobalCheckpoint())); + assertThat(newInstance.lastRequestedSeqNo(), equalTo(expectedInstance.lastRequestedSeqNo())); + assertThat(newInstance.numberOfConcurrentReads(), equalTo(expectedInstance.numberOfConcurrentReads())); + assertThat(newInstance.numberOfConcurrentWrites(), equalTo(expectedInstance.numberOfConcurrentWrites())); + assertThat(newInstance.numberOfQueuedWrites(), equalTo(expectedInstance.numberOfQueuedWrites())); + assertThat(newInstance.mappingVersion(), equalTo(expectedInstance.mappingVersion())); + assertThat(newInstance.totalFetchTimeMillis(), equalTo(expectedInstance.totalFetchTimeMillis())); + assertThat(newInstance.numberOfSuccessfulFetches(), equalTo(expectedInstance.numberOfSuccessfulFetches())); + assertThat(newInstance.numberOfFailedFetches(), equalTo(expectedInstance.numberOfFailedFetches())); + assertThat(newInstance.operationsReceived(), equalTo(expectedInstance.operationsReceived())); + assertThat(newInstance.totalTransferredBytes(), equalTo(expectedInstance.totalTransferredBytes())); + assertThat(newInstance.totalIndexTimeMillis(), equalTo(expectedInstance.totalIndexTimeMillis())); + assertThat(newInstance.numberOfSuccessfulBulkOperations(), equalTo(expectedInstance.numberOfSuccessfulBulkOperations())); + assertThat(newInstance.numberOfFailedBulkOperations(), equalTo(expectedInstance.numberOfFailedBulkOperations())); + assertThat(newInstance.numberOfOperationsIndexed(), equalTo(expectedInstance.numberOfOperationsIndexed())); + assertThat(newInstance.fetchExceptions().size(), equalTo(expectedInstance.fetchExceptions().size())); + assertThat(newInstance.fetchExceptions().keySet(), equalTo(expectedInstance.fetchExceptions().keySet())); + for (final Map.Entry entry : newInstance.fetchExceptions().entrySet()) { + // x-content loses the exception + final ElasticsearchException expected = expectedInstance.fetchExceptions().get(entry.getKey()); + assertThat(entry.getValue().getMessage(), containsString(expected.getMessage())); + assertNotNull(entry.getValue().getCause()); + assertThat( + entry.getValue().getCause(), + anyOf(instanceOf(ElasticsearchException.class), instanceOf(IllegalStateException.class))); + assertThat(entry.getValue().getCause().getMessage(), containsString(expected.getCause().getMessage())); + } + assertThat(newInstance.timeSinceLastFetchMillis(), equalTo(expectedInstance.timeSinceLastFetchMillis())); + } + + @Override + protected boolean assertToXContentEquivalence() { + return false; + } + + private NavigableMap randomReadExceptions() { + final int count = randomIntBetween(0, 16); + final NavigableMap readExceptions = new TreeMap<>(); + for (int i = 0; i < count; i++) { + readExceptions.put(randomNonNegativeLong(), new ElasticsearchException(new IllegalStateException("index [" + i + "]"))); + } + return readExceptions; + } + + @Override + protected Writeable.Reader instanceReader() { + return ShardFollowNodeTask.Status::new; + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java new file mode 100644 index 00000000000..4f7c0bf1664 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskTests.java @@ -0,0 +1,826 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; + +import java.net.ConnectException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.LongConsumer; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class ShardFollowNodeTaskTests extends ESTestCase { + + private Exception fatalError; + private List shardChangesRequests; + private List> bulkShardOperationRequests; + private BiConsumer scheduler = (delay, task) -> task.run(); + + private Consumer beforeSendShardChangesRequest = status -> {}; + + private AtomicBoolean simulateResponse = new AtomicBoolean(); + + private Queue readFailures; + private Queue writeFailures; + private Queue mappingUpdateFailures; + private Queue mappingVersions; + private Queue leaderGlobalCheckpoints; + private Queue followerGlobalCheckpoints; + private Queue maxSeqNos; + + public void testCoordinateReads() { + ShardFollowNodeTask task = createShardFollowTask(8, between(8, 20), between(1, 20), Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 3, -1); + task.coordinateReads(); + assertThat(shardChangesRequests, contains(new long[]{0L, 8L})); // treat this a peak request + shardChangesRequests.clear(); + task.innerHandleReadResponse(0, 5L, generateShardChangesResponse(0, 5L, 0L, 60L)); + assertThat(shardChangesRequests, contains(new long[][]{ + {6L, 8L}, {14L, 8L}, {22L, 8L}, {30L, 8L}, {38L, 8L}, {46L, 8L}, {54L, 7L}} + )); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(7)); + assertThat(status.lastRequestedSeqNo(), equalTo(60L)); + } + + public void testWriteBuffer() { + // Need to set concurrentWrites to 0, other the write buffer gets flushed immediately: + ShardFollowNodeTask task = createShardFollowTask(64, 1, 0, 32, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + // Also invokes the coordinatesReads() method: + task.innerHandleReadResponse(0L, 63L, generateShardChangesResponse(0, 63, 0L, 128L)); + assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because write buffer is full + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(0)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(128L)); + } + + public void testMaxConcurrentReads() { + ShardFollowNodeTask task = createShardFollowTask(8, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 64, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(8L)); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.lastRequestedSeqNo(), equalTo(7L)); + } + + public void testTaskCancelled() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 64, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + // The call the updateMapping is a noop, so noting happens. + task.start(128L, 128L, task.getStatus().followerGlobalCheckpoint(), task.getStatus().followerMaxSeqNo()); + task.markAsCompleted(); + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(0)); + } + + public void testTaskCancelledAfterReadLimitHasBeenReached() { + ShardFollowNodeTask task = createShardFollowTask(16, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 31, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(16L)); + + task.markAsCompleted(); + shardChangesRequests.clear(); + // Also invokes the coordinatesReads() method: + task.innerHandleReadResponse(0L, 15L, generateShardChangesResponse(0, 15, 0L, 31L)); + assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because task has been cancelled + assertThat(bulkShardOperationRequests.size(), equalTo(0)); // no more writes, because task has been cancelled + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(0)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(15L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(31L)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testTaskCancelledAfterWriteBufferLimitHasBeenReached() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, 32, Long.MAX_VALUE); + startTask(task, 64, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + task.markAsCompleted(); + shardChangesRequests.clear(); + // Also invokes the coordinatesReads() method: + task.innerHandleReadResponse(0L, 63L, generateShardChangesResponse(0, 63, 0L, 128L)); + assertThat(shardChangesRequests.size(), equalTo(0)); // no more reads, because task has been cancelled + assertThat(bulkShardOperationRequests.size(), equalTo(0)); // no more writes, because task has been cancelled + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(0)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(128L)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testReceiveRetryableError() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + int max = randomIntBetween(1, 10); + for (int i = 0; i < max; i++) { + readFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); + } + mappingVersions.add(1L); + leaderGlobalCheckpoints.add(63L); + maxSeqNos.add(63L); + simulateResponse.set(true); + final AtomicLong retryCounter = new AtomicLong(); + // before each retry, we assert the fetch failures; after the last retry, the fetch failure should clear + beforeSendShardChangesRequest = status -> { + assertThat(status.numberOfFailedFetches(), equalTo(retryCounter.get())); + if (retryCounter.get() > 0) { + assertThat(status.fetchExceptions().entrySet(), hasSize(1)); + final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); + assertThat(entry.getKey(), equalTo(0L)); + assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); + assertNotNull(entry.getValue().getCause()); + assertThat(entry.getValue().getCause(), instanceOf(ShardNotFoundException.class)); + final ShardNotFoundException cause = (ShardNotFoundException) entry.getValue().getCause(); + assertThat(cause.getShardId().getIndexName(), equalTo("leader_index")); + assertThat(cause.getShardId().getId(), equalTo(0)); + } + retryCounter.incrementAndGet(); + }; + task.coordinateReads(); + + // NUmber of requests is equal to initial request + retried attempts + assertThat(shardChangesRequests.size(), equalTo(max + 1)); + for (long[] shardChangesRequest : shardChangesRequests) { + assertThat(shardChangesRequest[0], equalTo(0L)); + assertThat(shardChangesRequest[1], equalTo(64L)); + } + + assertFalse("task is not stopped", task.isStopped()); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.numberOfFailedFetches(), equalTo((long)max)); + assertThat(status.numberOfSuccessfulFetches(), equalTo(1L)); + // the fetch failure has cleared + assertThat(status.fetchExceptions().entrySet(), hasSize(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testReceiveRetryableErrorRetriedTooManyTimes() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + int max = randomIntBetween(11, 32); + for (int i = 0; i < max; i++) { + readFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); + } + final AtomicLong retryCounter = new AtomicLong(); + // before each retry, we assert the fetch failures; after the last retry, the fetch failure should persist + beforeSendShardChangesRequest = status -> { + assertThat(status.numberOfFailedFetches(), equalTo(retryCounter.get())); + if (retryCounter.get() > 0) { + assertThat(status.fetchExceptions().entrySet(), hasSize(1)); + final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); + assertThat(entry.getKey(), equalTo(0L)); + assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); + assertNotNull(entry.getValue().getCause()); + assertThat(entry.getValue().getCause(), instanceOf(ShardNotFoundException.class)); + final ShardNotFoundException cause = (ShardNotFoundException) entry.getValue().getCause(); + assertThat(cause.getShardId().getIndexName(), equalTo("leader_index")); + assertThat(cause.getShardId().getId(), equalTo(0)); + } + retryCounter.incrementAndGet(); + }; + task.coordinateReads(); + + assertThat(shardChangesRequests.size(), equalTo(11)); + for (long[] shardChangesRequest : shardChangesRequests) { + assertThat(shardChangesRequest[0], equalTo(0L)); + assertThat(shardChangesRequest[1], equalTo(64L)); + } + + assertTrue("task is stopped", task.isStopped()); + assertThat(fatalError, notNullValue()); + assertThat(fatalError.getMessage(), containsString("retrying failed [")); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.numberOfFailedFetches(), equalTo(11L)); + assertThat(status.fetchExceptions().entrySet(), hasSize(1)); + final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); + assertThat(entry.getKey(), equalTo(0L)); + assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); + assertNotNull(entry.getValue().getCause()); + assertThat(entry.getValue().getCause(), instanceOf(ShardNotFoundException.class)); + final ShardNotFoundException cause = (ShardNotFoundException) entry.getValue().getCause(); + assertThat(cause.getShardId().getIndexName(), equalTo("leader_index")); + assertThat(cause.getShardId().getId(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testReceiveNonRetryableError() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + Exception failure = new RuntimeException("replication failed"); + readFailures.add(failure); + final AtomicBoolean invoked = new AtomicBoolean(); + // since there will be only one failure, this should only be invoked once and there should not be a fetch failure + beforeSendShardChangesRequest = status -> { + if (invoked.compareAndSet(false, true)) { + assertThat(status.numberOfFailedFetches(), equalTo(0L)); + assertThat(status.fetchExceptions().entrySet(), hasSize(0)); + } else { + fail("invoked twice"); + } + }; + task.coordinateReads(); + + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + assertTrue("task is stopped", task.isStopped()); + assertThat(fatalError, sameInstance(failure)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.numberOfFailedFetches(), equalTo(1L)); + assertThat(status.fetchExceptions().entrySet(), hasSize(1)); + final Map.Entry entry = status.fetchExceptions().entrySet().iterator().next(); + assertThat(entry.getKey(), equalTo(0L)); + assertThat(entry.getValue(), instanceOf(ElasticsearchException.class)); + assertNotNull(entry.getValue().getCause()); + assertThat(entry.getValue().getCause(), instanceOf(RuntimeException.class)); + final RuntimeException cause = (RuntimeException) entry.getValue().getCause(); + assertThat(cause.getMessage(), equalTo("replication failed")); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testHandleReadResponse() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 63L); + task.innerHandleReadResponse(0L, 63L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(1)); + assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.mappingVersion(), equalTo(0L)); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testReceiveLessThanRequested() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 20, 0L, 31L); + task.innerHandleReadResponse(0L, 63L, response); + + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(21L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(43L)); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testCancelAndReceiveLessThanRequested() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + task.markAsCompleted(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 31, 0L, 31L); + task.innerHandleReadResponse(0L, 64L, response); + + assertThat(shardChangesRequests.size(), equalTo(0)); + assertThat(bulkShardOperationRequests.size(), equalTo(0)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(0)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testReceiveNothingExpectedSomething() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + task.innerHandleReadResponse(0L, 63L, new ShardChangesAction.Response(0, 0, 0, new Translog.Operation[0])); + + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testDelayCoordinatesRead() { + int[] counter = new int[]{0}; + scheduler = (delay, task) -> { + counter[0]++; + task.run(); + }; + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 63L); + // Also invokes coordinateReads() + task.innerHandleReadResponse(0L, 63L, response); + task.innerHandleReadResponse(64L, 63L, + new ShardChangesAction.Response(0, 63L, 63L, new Translog.Operation[0])); + assertThat(counter[0], equalTo(1)); + } + + public void testMappingUpdate() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + mappingVersions.add(1L); + task.coordinateReads(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 1L, 63L); + task.handleReadResponse(0L, 63L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(1)); + assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.mappingVersion(), equalTo(1L)); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testMappingUpdateRetryableError() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + int max = randomIntBetween(1, 10); + for (int i = 0; i < max; i++) { + mappingUpdateFailures.add(new ConnectException()); + } + mappingVersions.add(1L); + task.coordinateReads(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 1L, 63L); + task.handleReadResponse(0L, 63L, response); + + assertThat(mappingUpdateFailures.size(), equalTo(0)); + assertThat(bulkShardOperationRequests.size(), equalTo(1)); + assertThat(task.isStopped(), equalTo(false)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.mappingVersion(), equalTo(1L)); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + + } + + public void testMappingUpdateRetryableErrorRetriedTooManyTimes() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + int max = randomIntBetween(11, 20); + for (int i = 0; i < max; i++) { + mappingUpdateFailures.add(new ConnectException()); + } + mappingVersions.add(1L); + task.coordinateReads(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 64, 1L, 64L); + task.handleReadResponse(0L, 64L, response); + + assertThat(mappingUpdateFailures.size(), equalTo(max - 11)); + assertThat(mappingVersions.size(), equalTo(1)); + assertThat(bulkShardOperationRequests.size(), equalTo(0)); + assertThat(task.isStopped(), equalTo(true)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.mappingVersion(), equalTo(0L)); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testMappingUpdateNonRetryableError() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + mappingUpdateFailures.add(new RuntimeException()); + task.coordinateReads(); + ShardChangesAction.Response response = generateShardChangesResponse(0, 64, 1L, 64L); + task.handleReadResponse(0L, 64L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(0)); + assertThat(task.isStopped(), equalTo(true)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.mappingVersion(), equalTo(0L)); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(0)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + } + + public void testCoordinateWrites() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 63L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 63L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(1)); + assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testMaxConcurrentWrites() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 2, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardChangesAction.Response response = generateShardChangesResponse(0, 256, 0L, 256L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 64L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(2)); + assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()).subList(0, 64))); + assertThat(bulkShardOperationRequests.get(1), equalTo(Arrays.asList(response.getOperations()).subList(64, 128))); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentWrites(), equalTo(2)); + + task = createShardFollowTask(64, 1, 4, Integer.MAX_VALUE, Long.MAX_VALUE); + response = generateShardChangesResponse(0, 256, 0L, 256L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 64L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(4)); + assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()).subList(0, 64))); + assertThat(bulkShardOperationRequests.get(1), equalTo(Arrays.asList(response.getOperations()).subList(64, 128))); + assertThat(bulkShardOperationRequests.get(2), equalTo(Arrays.asList(response.getOperations()).subList(128, 192))); + assertThat(bulkShardOperationRequests.get(3), equalTo(Arrays.asList(response.getOperations()).subList(192, 256))); + + status = task.getStatus(); + assertThat(status.numberOfConcurrentWrites(), equalTo(4)); + } + + public void testMaxBatchOperationCount() { + ShardFollowNodeTask task = createShardFollowTask(8, 1, 32, Integer.MAX_VALUE, Long.MAX_VALUE); + ShardChangesAction.Response response = generateShardChangesResponse(0, 256, 0L, 256L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 64L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(32)); + for (int i = 0; i < 32; i += 8) { + int offset = i * 8; + assertThat(bulkShardOperationRequests.get(i), equalTo(Arrays.asList(response.getOperations()).subList(offset, offset + 8))); + } + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentWrites(), equalTo(32)); + } + + public void testRetryableError() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + int max = randomIntBetween(1, 10); + for (int i = 0; i < max; i++) { + writeFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); + } + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 63L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 63L, response); + + // Number of requests is equal to initial request + retried attempts: + assertThat(bulkShardOperationRequests.size(), equalTo(max + 1)); + for (List operations : bulkShardOperationRequests) { + assertThat(operations, equalTo(Arrays.asList(response.getOperations()))); + } + assertThat(task.isStopped(), equalTo(false)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testRetryableErrorRetriedTooManyTimes() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + int max = randomIntBetween(11, 32); + for (int i = 0; i < max; i++) { + writeFailures.add(new ShardNotFoundException(new ShardId("leader_index", "", 0))); + } + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 643); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 63L, response); + + // Number of requests is equal to initial request + retried attempts: + assertThat(bulkShardOperationRequests.size(), equalTo(11)); + for (List operations : bulkShardOperationRequests) { + assertThat(operations, equalTo(Arrays.asList(response.getOperations()))); + } + assertThat(task.isStopped(), equalTo(true)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testNonRetryableError() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + writeFailures.add(new RuntimeException()); + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 63L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 63L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(1)); + assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); + assertThat(task.isStopped(), equalTo(true)); + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentWrites(), equalTo(1)); + assertThat(status.followerGlobalCheckpoint(), equalTo(-1L)); + } + + public void testMaxBatchBytesLimit() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 128, Integer.MAX_VALUE, 1L); + startTask(task, 64, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 64L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 64L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(64)); + } + + public void testHandleWriteResponse() { + ShardFollowNodeTask task = createShardFollowTask(64, 1, 1, Integer.MAX_VALUE, Long.MAX_VALUE); + startTask(task, 63, -1); + + task.coordinateReads(); + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(0L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + shardChangesRequests.clear(); + followerGlobalCheckpoints.add(63L); + ShardChangesAction.Response response = generateShardChangesResponse(0, 63, 0L, 63L); + // Also invokes coordinatesWrites() + task.innerHandleReadResponse(0L, 63L, response); + + assertThat(bulkShardOperationRequests.size(), equalTo(1)); + assertThat(bulkShardOperationRequests.get(0), equalTo(Arrays.asList(response.getOperations()))); + + // handleWrite() also delegates to coordinateReads + assertThat(shardChangesRequests.size(), equalTo(1)); + assertThat(shardChangesRequests.get(0)[0], equalTo(64L)); + assertThat(shardChangesRequests.get(0)[1], equalTo(64L)); + + ShardFollowNodeTask.Status status = task.getStatus(); + assertThat(status.numberOfConcurrentReads(), equalTo(1)); + assertThat(status.lastRequestedSeqNo(), equalTo(63L)); + assertThat(status.leaderGlobalCheckpoint(), equalTo(63L)); + assertThat(status.followerGlobalCheckpoint(), equalTo(63L)); + } + + ShardFollowNodeTask createShardFollowTask(int maxBatchOperationCount, int maxConcurrentReadBatches, int maxConcurrentWriteBatches, + int bufferWriteLimit, long maxBatchSizeInBytes) { + AtomicBoolean stopped = new AtomicBoolean(false); + ShardFollowTask params = new ShardFollowTask(null, new ShardId("follow_index", "", 0), + new ShardId("leader_index", "", 0), maxBatchOperationCount, maxConcurrentReadBatches, maxBatchSizeInBytes, + maxConcurrentWriteBatches, bufferWriteLimit, TimeValue.ZERO, TimeValue.ZERO, Collections.emptyMap()); + + shardChangesRequests = new ArrayList<>(); + bulkShardOperationRequests = new ArrayList<>(); + readFailures = new LinkedList<>(); + writeFailures = new LinkedList<>(); + mappingUpdateFailures = new LinkedList<>(); + mappingVersions = new LinkedList<>(); + leaderGlobalCheckpoints = new LinkedList<>(); + followerGlobalCheckpoints = new LinkedList<>(); + maxSeqNos = new LinkedList<>(); + return new ShardFollowNodeTask( + 1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), params, scheduler, System::nanoTime) { + + @Override + protected void innerUpdateMapping(LongConsumer handler, Consumer errorHandler) { + Exception failure = mappingUpdateFailures.poll(); + if (failure != null) { + errorHandler.accept(failure); + return; + } + + final Long mappingVersion = mappingVersions.poll(); + if (mappingVersion != null) { + handler.accept(mappingVersion); + } + } + + @Override + protected void innerSendBulkShardOperationsRequest( + final List operations, + final Consumer handler, + final Consumer errorHandler) { + bulkShardOperationRequests.add(operations); + Exception writeFailure = ShardFollowNodeTaskTests.this.writeFailures.poll(); + if (writeFailure != null) { + errorHandler.accept(writeFailure); + return; + } + Long followerGlobalCheckpoint = followerGlobalCheckpoints.poll(); + if (followerGlobalCheckpoint != null) { + final BulkShardOperationsResponse response = new BulkShardOperationsResponse(); + response.setGlobalCheckpoint(followerGlobalCheckpoint); + response.setMaxSeqNo(followerGlobalCheckpoint); + handler.accept(response); + } + } + + @Override + protected void innerSendShardChangesRequest(long from, int requestBatchSize, Consumer handler, + Consumer errorHandler) { + beforeSendShardChangesRequest.accept(getStatus()); + shardChangesRequests.add(new long[]{from, requestBatchSize}); + Exception readFailure = ShardFollowNodeTaskTests.this.readFailures.poll(); + if (readFailure != null) { + errorHandler.accept(readFailure); + } else if (simulateResponse.get()) { + final Translog.Operation[] operations = new Translog.Operation[requestBatchSize]; + for (int i = 0; i < requestBatchSize; i++) { + operations[i] = new Translog.NoOp(from + i, 0, "test"); + } + final ShardChangesAction.Response response = + new ShardChangesAction.Response( + mappingVersions.poll(), + leaderGlobalCheckpoints.poll(), + maxSeqNos.poll(), + operations); + handler.accept(response); + } + } + + @Override + protected boolean isStopped() { + return stopped.get(); + } + + @Override + public void markAsCompleted() { + stopped.set(true); + } + + @Override + public void markAsFailed(Exception e) { + fatalError = e; + stopped.set(true); + } + }; + } + + private static ShardChangesAction.Response generateShardChangesResponse(long fromSeqNo, long toSeqNo, long mappingVersion, + long leaderGlobalCheckPoint) { + List ops = new ArrayList<>(); + for (long seqNo = fromSeqNo; seqNo <= toSeqNo; seqNo++) { + String id = UUIDs.randomBase64UUID(); + byte[] source = "{}".getBytes(StandardCharsets.UTF_8); + ops.add(new Translog.Index("doc", id, seqNo, 0, source)); + } + return new ShardChangesAction.Response( + mappingVersion, leaderGlobalCheckPoint, leaderGlobalCheckPoint, ops.toArray(new Translog.Operation[0])); + } + + void startTask(ShardFollowNodeTask task, long leaderGlobalCheckpoint, long followerGlobalCheckpoint) { + // The call the updateMapping is a noop, so noting happens. + task.start(leaderGlobalCheckpoint, leaderGlobalCheckpoint, followerGlobalCheckpoint, followerGlobalCheckpoint); + } + + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java new file mode 100644 index 00000000000..ec180943a3b --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -0,0 +1,272 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import com.carrotsearch.hppc.LongHashSet; +import com.carrotsearch.hppc.LongSet; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.delete.DeleteRequest; +import org.elasticsearch.action.support.replication.TransportWriteAction; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.Engine.Operation.Origin; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.replication.ESIndexLevelReplicationTestCase; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ccr.CcrSettings; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsRequest; +import org.elasticsearch.xpack.ccr.action.bulk.BulkShardOperationsResponse; +import org.elasticsearch.xpack.ccr.action.bulk.TransportBulkShardOperationsAction; +import org.elasticsearch.xpack.ccr.index.engine.FollowingEngineFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.LongConsumer; + +import static org.hamcrest.Matchers.equalTo; + +public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTestCase { + + public void testSimpleCcrReplication() throws Exception { + try (ReplicationGroup leaderGroup = createGroup(randomInt(2)); + ReplicationGroup followerGroup = createFollowGroup(randomInt(2))) { + leaderGroup.startAll(); + int docCount = leaderGroup.appendDocs(randomInt(64)); + leaderGroup.assertAllEqual(docCount); + followerGroup.startAll(); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); + final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start( + leaderSeqNoStats.getGlobalCheckpoint(), + leaderSeqNoStats.getMaxSeqNo(), + followerSeqNoStats.getGlobalCheckpoint(), + followerSeqNoStats.getMaxSeqNo()); + docCount += leaderGroup.appendDocs(randomInt(128)); + leaderGroup.syncGlobalCheckpoint(); + leaderGroup.assertAllEqual(docCount); + Set indexedDocIds = getShardDocUIDs(leaderGroup.getPrimary()); + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + followerGroup.assertAllEqual(indexedDocIds.size()); + }); + // Deletes should be replicated to the follower + List deleteDocIds = randomSubsetOf(indexedDocIds); + for (String deleteId : deleteDocIds) { + BulkItemResponse resp = leaderGroup.delete(new DeleteRequest(index.getName(), "type", deleteId)); + assertThat(resp.getResponse().getResult(), equalTo(DocWriteResponse.Result.DELETED)); + } + leaderGroup.syncGlobalCheckpoint(); + assertBusy(() -> { + assertThat(followerGroup.getPrimary().getGlobalCheckpoint(), equalTo(leaderGroup.getPrimary().getGlobalCheckpoint())); + followerGroup.assertAllEqual(indexedDocIds.size() - deleteDocIds.size()); + }); + shardFollowTask.markAsCompleted(); + assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup); + } + } + + public void testFailLeaderReplicaShard() throws Exception { + try (ReplicationGroup leaderGroup = createGroup(1 + randomInt(1)); + ReplicationGroup followerGroup = createFollowGroup(randomInt(2))) { + leaderGroup.startAll(); + followerGroup.startAll(); + ShardFollowNodeTask shardFollowTask = createShardFollowTask(leaderGroup, followerGroup); + final SeqNoStats leaderSeqNoStats = leaderGroup.getPrimary().seqNoStats(); + final SeqNoStats followerSeqNoStats = followerGroup.getPrimary().seqNoStats(); + shardFollowTask.start( + leaderSeqNoStats.getGlobalCheckpoint(), + leaderSeqNoStats.getMaxSeqNo(), + followerSeqNoStats.getGlobalCheckpoint(), + followerSeqNoStats.getMaxSeqNo()); + int docCount = 256; + leaderGroup.appendDocs(1); + Runnable task = () -> { + try { + leaderGroup.appendDocs(docCount - 1); + leaderGroup.syncGlobalCheckpoint(); + } catch (Exception e) { + throw new AssertionError(e); + } + }; + Thread thread = new Thread(task); + thread.start(); + + // Remove and add a new replica + IndexShard luckyReplica = randomFrom(leaderGroup.getReplicas()); + leaderGroup.removeReplica(luckyReplica); + luckyReplica.close("stop replica", false); + luckyReplica.store().close(); + leaderGroup.addReplica(); + leaderGroup.startReplicas(1); + thread.join(); + + leaderGroup.assertAllEqual(docCount); + assertBusy(() -> followerGroup.assertAllEqual(docCount)); + shardFollowTask.markAsCompleted(); + assertConsistentHistoryBetweenLeaderAndFollower(leaderGroup, followerGroup); + } + } + + @Override + protected ReplicationGroup createGroup(int replicas, Settings settings) throws IOException { + Settings newSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, replicas) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), 10000) + .put(settings) + .build(); + if (CcrSettings.CCR_FOLLOWING_INDEX_SETTING.get(newSettings)) { + IndexMetaData metaData = buildIndexMetaData(replicas, newSettings, indexMapping); + return new ReplicationGroup(metaData) { + + @Override + protected EngineFactory getEngineFactory(ShardRouting routing) { + return new FollowingEngineFactory(); + } + }; + } else { + return super.createGroup(replicas, newSettings); + } + } + + private ReplicationGroup createFollowGroup(int replicas) throws IOException { + Settings.Builder settingsBuilder = Settings.builder(); + settingsBuilder.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true); + return createGroup(replicas, settingsBuilder.build()); + } + + private ShardFollowNodeTask createShardFollowTask(ReplicationGroup leaderGroup, ReplicationGroup followerGroup) { + ShardFollowTask params = new ShardFollowTask(null, new ShardId("follow_index", "", 0), + new ShardId("leader_index", "", 0), between(1, 64), between(1, 8), Long.MAX_VALUE, between(1, 4), 10240, + TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(10), Collections.emptyMap()); + + BiConsumer scheduler = (delay, task) -> threadPool.schedule(delay, ThreadPool.Names.GENERIC, task); + AtomicBoolean stopped = new AtomicBoolean(false); + LongSet fetchOperations = new LongHashSet(); + return new ShardFollowNodeTask( + 1L, "type", ShardFollowTask.NAME, "description", null, Collections.emptyMap(), params, scheduler, System::nanoTime) { + @Override + protected synchronized void onOperationsFetched(Translog.Operation[] operations) { + super.onOperationsFetched(operations); + for (Translog.Operation operation : operations) { + if (fetchOperations.add(operation.seqNo()) == false) { + throw new AssertionError("Operation [" + operation + " ] was fetched already"); + } + } + } + + @Override + protected void innerUpdateMapping(LongConsumer handler, Consumer errorHandler) { + // noop, as mapping updates are not tested + handler.accept(1L); + } + + @Override + protected void innerSendBulkShardOperationsRequest( + final List operations, + final Consumer handler, + final Consumer errorHandler) { + Runnable task = () -> { + BulkShardOperationsRequest request = new BulkShardOperationsRequest(params.getFollowShardId(), operations); + ActionListener listener = ActionListener.wrap(handler::accept, errorHandler); + new CCRAction(request, listener, followerGroup).execute(); + }; + threadPool.executor(ThreadPool.Names.GENERIC).execute(task); + } + + @Override + protected void innerSendShardChangesRequest(long from, int maxOperationCount, Consumer handler, + Consumer errorHandler) { + Runnable task = () -> { + List indexShards = new ArrayList<>(leaderGroup.getReplicas()); + indexShards.add(leaderGroup.getPrimary()); + Collections.shuffle(indexShards, random()); + + Exception exception = null; + for (IndexShard indexShard : indexShards) { + final SeqNoStats seqNoStats = indexShard.seqNoStats(); + try { + Translog.Operation[] ops = ShardChangesAction.getOperations(indexShard, seqNoStats.getGlobalCheckpoint(), from, + maxOperationCount, params.getMaxBatchSizeInBytes()); + // hard code mapping version; this is ok, as mapping updates are not tested here + final ShardChangesAction.Response response = + new ShardChangesAction.Response(1L, seqNoStats.getGlobalCheckpoint(), seqNoStats.getMaxSeqNo(), ops); + handler.accept(response); + return; + } catch (Exception e) { + exception = e; + } + } + assert exception != null; + errorHandler.accept(exception); + }; + threadPool.executor(ThreadPool.Names.GENERIC).execute(task); + } + + @Override + protected boolean isStopped() { + return stopped.get(); + } + + @Override + public void markAsCompleted() { + stopped.set(true); + } + + @Override + public void markAsFailed(Exception e) { + stopped.set(true); + } + + }; + } + + private void assertConsistentHistoryBetweenLeaderAndFollower(ReplicationGroup leader, ReplicationGroup follower) throws IOException { + int totalOps = leader.getPrimary().estimateNumberOfHistoryOperations("test", 0); + for (IndexShard followingShard : follower) { + assertThat(followingShard.estimateNumberOfHistoryOperations("test", 0), equalTo(totalOps)); + } + } + + class CCRAction extends ReplicationAction { + + CCRAction(BulkShardOperationsRequest request, ActionListener listener, ReplicationGroup group) { + super(request, listener, group, "ccr"); + } + + @Override + protected PrimaryResult performOnPrimary(IndexShard primary, BulkShardOperationsRequest request) throws Exception { + TransportWriteAction.WritePrimaryResult result = + TransportBulkShardOperationsAction.shardOperationOnPrimary(primary.shardId(), request.getOperations(), + primary, logger); + return new PrimaryResult(result.replicaRequest(), result.finalResponseIfSuccessful); + } + + @Override + protected void performOnReplica(BulkShardOperationsRequest request, IndexShard replica) throws Exception { + TransportBulkShardOperationsAction.applyTranslogOperations(request.getOperations(), replica, Origin.REPLICA); + } + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java new file mode 100644 index 00000000000..300794a6c00 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskTests.java @@ -0,0 +1,44 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.Collections; + +public class ShardFollowTaskTests extends AbstractSerializingTestCase { + + @Override + protected ShardFollowTask doParseInstance(XContentParser parser) throws IOException { + return ShardFollowTask.fromXContent(parser); + } + + @Override + protected ShardFollowTask createTestInstance() { + return new ShardFollowTask( + randomAlphaOfLength(4), + new ShardId(randomAlphaOfLength(4), randomAlphaOfLength(4), randomInt(5)), + new ShardId(randomAlphaOfLength(4), randomAlphaOfLength(4), randomInt(5)), + randomIntBetween(1, Integer.MAX_VALUE), + randomIntBetween(1, Integer.MAX_VALUE), + randomNonNegativeLong(), + randomIntBetween(1, Integer.MAX_VALUE), + randomIntBetween(1, Integer.MAX_VALUE), + TimeValue.parseTimeValue(randomTimeValue(), ""), + TimeValue.parseTimeValue(randomTimeValue(), ""), + randomBoolean() ? null : Collections.singletonMap("key", "value")); + } + + @Override + protected Writeable.Reader instanceReader() { + return ShardFollowTask::new; + } +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java new file mode 100644 index 00000000000..4c6c0c060e4 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsTests.java @@ -0,0 +1,79 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.ccr.action.bulk; + +import org.apache.lucene.index.Term; +import org.elasticsearch.action.support.replication.TransportWriteAction; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.xpack.ccr.CcrSettings; +import org.elasticsearch.xpack.ccr.index.engine.FollowingEngineFactory; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class BulkShardOperationsTests extends IndexShardTestCase { + + private static final byte[] SOURCE = "{}".getBytes(StandardCharsets.UTF_8); + + // test that we use the primary term on the follower when applying operations from the leader + public void testPrimaryTermFromFollower() throws IOException { + final Settings settings = Settings.builder().put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true).build(); + final IndexShard followerPrimary = newStartedShard(true, settings, new FollowingEngineFactory()); + + // we use this primary on the operations yet we expect the applied operations to have the primary term of the follower + final long primaryTerm = randomLongBetween(1, Integer.MAX_VALUE); + + int numOps = randomIntBetween(0, 127); + final List operations = new ArrayList<>(randomIntBetween(0, 127)); + for (int i = 0; i < numOps; i++) { + final String id = Integer.toString(i); + final long seqNo = i; + final Translog.Operation.Type type = + randomValueOtherThan(Translog.Operation.Type.CREATE, () -> randomFrom(Translog.Operation.Type.values())); + switch (type) { + case INDEX: + operations.add(new Translog.Index("_doc", id, seqNo, primaryTerm, 0, SOURCE, null, -1)); + break; + case DELETE: + operations.add( + new Translog.Delete("_doc", id, new Term("_id", Uid.encodeId(id)), seqNo, primaryTerm, 0)); + break; + case NO_OP: + operations.add(new Translog.NoOp(seqNo, primaryTerm, "test")); + break; + default: + throw new IllegalStateException("unexpected operation type [" + type + "]"); + } + } + + final TransportWriteAction.WritePrimaryResult result = + TransportBulkShardOperationsAction.shardOperationOnPrimary(followerPrimary.shardId(), operations, followerPrimary, logger); + + try (Translog.Snapshot snapshot = followerPrimary.getHistoryOperations("test", 0)) { + assertThat(snapshot.totalOperations(), equalTo(operations.size())); + Translog.Operation operation; + while ((operation = snapshot.next()) != null) { + assertThat(operation.primaryTerm(), equalTo(followerPrimary.getOperationPrimaryTerm())); + } + } + + for (final Translog.Operation operation : result.replicaRequest().getOperations()) { + assertThat(operation.primaryTerm(), equalTo(followerPrimary.getOperationPrimaryTerm())); + } + + closeShards(followerPrimary); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java new file mode 100644 index 00000000000..e14b7513035 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.index.engine; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.mapper.SourceToParse; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.ccr.CcrSettings; + +import java.util.Collections; +import java.util.concurrent.CountDownLatch; + +import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; +import static org.hamcrest.Matchers.equalTo; + +public class FollowEngineIndexShardTests extends IndexShardTestCase { + + public void testDoNotFillGaps() throws Exception { + Settings settings = Settings.builder() + .put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true) + .build(); + final IndexShard indexShard = newStartedShard(false, settings, new FollowingEngineFactory()); + + long seqNo = -1; + for (int i = 0; i < 8; i++) { + final String id = Long.toString(i); + SourceToParse sourceToParse = SourceToParse.source(indexShard.shardId().getIndexName(), "_doc", id, + new BytesArray("{}"), XContentType.JSON); + indexShard.applyIndexOperationOnReplica(++seqNo, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse); + } + long seqNoBeforeGap = seqNo; + seqNo += 8; + SourceToParse sourceToParse = SourceToParse.source(indexShard.shardId().getIndexName(), "_doc", "9", + new BytesArray("{}"), XContentType.JSON); + indexShard.applyIndexOperationOnReplica(seqNo, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse); + + // promote the replica to primary: + final ShardRouting replicaRouting = indexShard.routingEntry(); + final ShardRouting primaryRouting = + newShardRouting( + replicaRouting.shardId(), + replicaRouting.currentNodeId(), + null, + true, + ShardRoutingState.STARTED, + replicaRouting.allocationId()); + indexShard.updateShardState(primaryRouting, indexShard.getOperationPrimaryTerm() + 1, (shard, listener) -> {}, + 0L, Collections.singleton(primaryRouting.allocationId().getId()), + new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build(), Collections.emptySet()); + + final CountDownLatch latch = new CountDownLatch(1); + ActionListener actionListener = ActionListener.wrap(releasable -> { + releasable.close(); + latch.countDown(); + }, e -> {assert false : "expected no exception, but got [" + e.getMessage() + "]";}); + indexShard.acquirePrimaryOperationPermit(actionListener, ThreadPool.Names.GENERIC, ""); + latch.await(); + assertThat(indexShard.getLocalCheckpoint(), equalTo(seqNoBeforeGap)); + indexShard.refresh("test"); + assertThat(indexShard.docStats().getCount(), equalTo(9L)); + closeShards(indexShard); + } + +} diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java new file mode 100644 index 00000000000..677b8955490 --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowingEngineTests.java @@ -0,0 +1,332 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.index.engine; + +import org.apache.logging.log4j.Logger; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.store.Directory; +import org.elasticsearch.Version; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.codec.CodecService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.EngineTestCase; +import org.elasticsearch.index.engine.TranslogHandler; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.DirectoryService; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogConfig; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.test.DummyShardLock; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasToString; + +public class FollowingEngineTests extends ESTestCase { + + private ThreadPool threadPool; + private Index index; + private ShardId shardId; + private AtomicLong primaryTerm = new AtomicLong(); + + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("following-engine-tests"); + index = new Index("index", "uuid"); + shardId = new ShardId(index, 0); + primaryTerm.set(randomLongBetween(1, Long.MAX_VALUE)); + } + + public void tearDown() throws Exception { + terminate(threadPool); + super.tearDown(); + } + + public void testFollowingEngineRejectsNonFollowingIndex() throws IOException { + final Settings.Builder builder = + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT); + if (randomBoolean()) { + builder.put("index.xpack.ccr.following_index", false); + } + final Settings settings = builder.build(); + final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + try (Store store = createStore(shardId, indexSettings, newDirectory())) { + final EngineConfig engineConfig = engineConfig(shardId, indexSettings, threadPool, store, logger, xContentRegistry()); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new FollowingEngine(engineConfig)); + assertThat(e, hasToString(containsString("a following engine can not be constructed for a non-following index"))); + } + } + + public void testIndexSeqNoIsMaintained() throws IOException { + final long seqNo = randomIntBetween(0, Integer.MAX_VALUE); + runIndexTest( + seqNo, + Engine.Operation.Origin.PRIMARY, + (followingEngine, index) -> { + final Engine.IndexResult result = followingEngine.index(index); + assertThat(result.getSeqNo(), equalTo(seqNo)); + }); + } + + /* + * A following engine (whether or not it is an engine for a primary or replica shard) needs to maintain ordering semantics as the + * operations presented to it can arrive out of order (while a leader engine that is for a primary shard dictates the order). This test + * ensures that these semantics are maintained. + */ + public void testOutOfOrderDocuments() throws IOException { + final Settings settings = + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put("index.xpack.ccr.following_index", true) + .build(); + final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + try (Store store = createStore(shardId, indexSettings, newDirectory())) { + final EngineConfig engineConfig = engineConfig(shardId, indexSettings, threadPool, store, logger, xContentRegistry()); + try (FollowingEngine followingEngine = createEngine(store, engineConfig)) { + final VersionType versionType = + randomFrom(VersionType.INTERNAL, VersionType.EXTERNAL, VersionType.EXTERNAL_GTE, VersionType.FORCE); + final List ops = EngineTestCase.generateSingleDocHistory(true, versionType, 2, 2, 20, "id"); + EngineTestCase.assertOpsOnReplica(ops, followingEngine, true, logger); + } + } + } + + public void runIndexTest( + final long seqNo, + final Engine.Operation.Origin origin, + final CheckedBiConsumer consumer) throws IOException { + final Settings settings = + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put("index.xpack.ccr.following_index", true) + .build(); + final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + try (Store store = createStore(shardId, indexSettings, newDirectory())) { + final EngineConfig engineConfig = engineConfig(shardId, indexSettings, threadPool, store, logger, xContentRegistry()); + try (FollowingEngine followingEngine = createEngine(store, engineConfig)) { + final Engine.Index index = createIndexOp("id", seqNo, origin); + consumer.accept(followingEngine, index); + } + } + } + + public void testDeleteSeqNoIsMaintained() throws IOException { + final long seqNo = randomIntBetween(0, Integer.MAX_VALUE); + runDeleteTest( + seqNo, + Engine.Operation.Origin.PRIMARY, + (followingEngine, delete) -> { + final Engine.DeleteResult result = followingEngine.delete(delete); + assertThat(result.getSeqNo(), equalTo(seqNo)); + }); + } + + public void runDeleteTest( + final long seqNo, + final Engine.Operation.Origin origin, + final CheckedBiConsumer consumer) throws IOException { + final Settings settings = + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put("index.xpack.ccr.following_index", true) + .build(); + final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + try (Store store = createStore(shardId, indexSettings, newDirectory())) { + final EngineConfig engineConfig = engineConfig(shardId, indexSettings, threadPool, store, logger, xContentRegistry()); + try (FollowingEngine followingEngine = createEngine(store, engineConfig)) { + final String id = "id"; + final Engine.Delete delete = new Engine.Delete( + "type", + id, + new Term("_id", id), + seqNo, + primaryTerm.get(), + randomNonNegativeLong(), + VersionType.EXTERNAL, + origin, + System.currentTimeMillis()); + + consumer.accept(followingEngine, delete); + } + } + } + + public void testDoNotFillSeqNoGaps() throws Exception { + final Settings settings = + Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + .put("index.version.created", Version.CURRENT) + .put("index.xpack.ccr.following_index", true) + .build(); + final IndexMetaData indexMetaData = IndexMetaData.builder(index.getName()).settings(settings).build(); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings); + try (Store store = createStore(shardId, indexSettings, newDirectory())) { + final EngineConfig engineConfig = engineConfig(shardId, indexSettings, threadPool, store, logger, xContentRegistry()); + try (FollowingEngine followingEngine = createEngine(store, engineConfig)) { + followingEngine.index(createIndexOp("id", 128, Engine.Operation.Origin.PRIMARY)); + int addedNoops = followingEngine.fillSeqNoGaps(primaryTerm.get()); + assertThat(addedNoops, equalTo(0)); + } + } + } + + private EngineConfig engineConfig( + final ShardId shardId, + final IndexSettings indexSettings, + final ThreadPool threadPool, + final Store store, + final Logger logger, + final NamedXContentRegistry xContentRegistry) throws IOException { + final IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); + final Path translogPath = createTempDir("translog"); + final TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); + return new EngineConfig( + shardId, + "allocation-id", + threadPool, + indexSettings, + null, + store, + newMergePolicy(), + indexWriterConfig.getAnalyzer(), + indexWriterConfig.getSimilarity(), + new CodecService(null, logger), + new Engine.EventListener() { + @Override + public void onFailedEngine(String reason, Exception e) { + + } + }, + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + translogConfig, + TimeValue.timeValueMinutes(5), + Collections.emptyList(), + Collections.emptyList(), + null, + new TranslogHandler( + xContentRegistry, IndexSettingsModule.newIndexSettings(shardId.getIndexName(), indexSettings.getSettings())), + new NoneCircuitBreakerService(), + () -> SequenceNumbers.NO_OPS_PERFORMED, + () -> primaryTerm.get(), + EngineTestCase.tombstoneDocSupplier() + ); + } + + private static Store createStore( + final ShardId shardId, final IndexSettings indexSettings, final Directory directory) throws IOException { + final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) { + @Override + public Directory newDirectory() throws IOException { + return directory; + } + }; + return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); + } + + private FollowingEngine createEngine(Store store, EngineConfig config) throws IOException { + store.createEmpty(); + final String translogUuid = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, shardId, 1L); + store.associateIndexWithNewTranslog(translogUuid); + FollowingEngine followingEngine = new FollowingEngine(config); + followingEngine.recoverFromTranslog(Long.MAX_VALUE); + return followingEngine; + } + + private Engine.Index createIndexOp(String id, long seqNo, Engine.Operation.Origin origin) { + final Field uidField = new Field("_id", id, IdFieldMapper.Defaults.FIELD_TYPE); + final String type = "type"; + final Field versionField = new NumericDocValuesField("_version", 0); + final SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); + final ParseContext.Document document = new ParseContext.Document(); + document.add(uidField); + document.add(versionField); + document.add(seqID.seqNo); + document.add(seqID.seqNoDocValue); + document.add(seqID.primaryTerm); + final BytesReference source = new BytesArray(new byte[]{1}); + final ParsedDocument parsedDocument = new ParsedDocument( + versionField, + seqID, + id, + type, + "routing", + Collections.singletonList(document), + source, + XContentType.JSON, + null); + + final long version; + final long autoGeneratedIdTimestamp; + if (randomBoolean()) { + version = 1; + autoGeneratedIdTimestamp = System.currentTimeMillis(); + } else { + version = randomNonNegativeLong(); + autoGeneratedIdTimestamp = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP; + } + return new Engine.Index( + new Term("_id", parsedDocument.id()), + parsedDocument, + seqNo, + primaryTerm.get(), + version, + VersionType.EXTERNAL, + origin, + System.currentTimeMillis(), + autoGeneratedIdTimestamp, + randomBoolean()); + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java index 043224e357b..e7460d5a2eb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RemoteClusterLicenseChecker.java @@ -119,7 +119,7 @@ public final class RemoteClusterLicenseChecker { } private final Client client; - private final Predicate predicate; + private final Predicate predicate; /** * Constructs a remote cluster license checker with the specified license predicate for checking license compatibility. The predicate @@ -128,7 +128,7 @@ public final class RemoteClusterLicenseChecker { * @param client the client * @param predicate the license predicate */ - public RemoteClusterLicenseChecker(final Client client, final Predicate predicate) { + public RemoteClusterLicenseChecker(final Client client, final Predicate predicate) { this.client = client; this.predicate = predicate; } @@ -159,7 +159,8 @@ public final class RemoteClusterLicenseChecker { @Override public void onResponse(final XPackInfoResponse xPackInfoResponse) { final XPackInfoResponse.LicenseInfo licenseInfo = xPackInfoResponse.getLicenseInfo(); - if ((licenseInfo.getStatus() == LicenseStatus.ACTIVE) == false || predicate.test(licenseInfo) == false) { + if ((licenseInfo.getStatus() == LicenseStatus.ACTIVE) == false + || predicate.test(License.OperationMode.resolve(licenseInfo.getMode())) == false) { listener.onResponse(LicenseCheck.failure(new RemoteClusterLicenseInfo(clusterAlias.get(), licenseInfo))); return; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java index 8afacbf0168..4069323cd4b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/XPackLicenseState.java @@ -524,13 +524,13 @@ public class XPackLicenseState { * {@code false}. */ public boolean isMachineLearningAllowed() { - // status is volatile - Status localStatus = status; - OperationMode operationMode = localStatus.mode; + // one-time volatile read as status could be updated on us while performing this check + final Status currentStatus = status; + return currentStatus.active && isMachineLearningAllowedForOperationMode(currentStatus.mode); + } - boolean licensed = operationMode == OperationMode.TRIAL || operationMode == OperationMode.PLATINUM; - - return licensed && localStatus.active; + public static boolean isMachineLearningAllowedForOperationMode(final OperationMode operationMode) { + return isPlatinumOrTrialOperationMode(operationMode); } /** @@ -638,4 +638,30 @@ public class XPackLicenseState { final OperationMode mode = status.mode; return mode == OperationMode.TRIAL ? (isSecurityExplicitlyEnabled || isSecurityEnabledByTrialVersion) : isSecurityEnabled; } + + /** + * Determine if cross-cluster replication should be enabled. + *

+ * Cross-cluster replication is only disabled when the license has expired or if the mode is not: + *

    + *
  • {@link OperationMode#PLATINUM}
  • + *
  • {@link OperationMode#TRIAL}
  • + *
+ * + * @return true is the license is compatible, otherwise false + */ + public boolean isCcrAllowed() { + // one-time volatile read as status could be updated on us while performing this check + final Status currentStatus = status; + return currentStatus.active && isCcrAllowedForOperationMode(currentStatus.mode); + } + + public static boolean isCcrAllowedForOperationMode(final OperationMode operationMode) { + return isPlatinumOrTrialOperationMode(operationMode); + } + + public static boolean isPlatinumOrTrialOperationMode(final OperationMode operationMode) { + return operationMode == OperationMode.PLATINUM || operationMode == OperationMode.TRIAL; + } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java index 8559ab0703b..223b7f00807 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReader.java @@ -35,6 +35,7 @@ import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -70,7 +71,11 @@ public final class FieldSubsetReader extends FilterLeafReader { super(in, new FilterDirectoryReader.SubReaderWrapper() { @Override public LeafReader wrap(LeafReader reader) { - return new FieldSubsetReader(reader, filter); + try { + return new FieldSubsetReader(reader, filter); + } catch (IOException e) { + throw new UncheckedIOException(e); + } } }); this.filter = filter; @@ -109,11 +114,13 @@ public final class FieldSubsetReader extends FilterLeafReader { private final FieldInfos fieldInfos; /** An automaton that only accepts authorized fields. */ private final CharacterRunAutomaton filter; + /** {@link Terms} cache with filtered stats for the {@link FieldNamesFieldMapper} field. */ + private final Terms fieldNamesFilterTerms; /** * Wrap a single segment, exposing a subset of its fields. */ - FieldSubsetReader(LeafReader in, CharacterRunAutomaton filter) { + FieldSubsetReader(LeafReader in, CharacterRunAutomaton filter) throws IOException { super(in); ArrayList filteredInfos = new ArrayList<>(); for (FieldInfo fi : in.getFieldInfos()) { @@ -123,6 +130,8 @@ public final class FieldSubsetReader extends FilterLeafReader { } fieldInfos = new FieldInfos(filteredInfos.toArray(new FieldInfo[filteredInfos.size()])); this.filter = filter; + final Terms fieldNameTerms = super.terms(FieldNamesFieldMapper.NAME); + this.fieldNamesFilterTerms = fieldNameTerms == null ? null : new FieldNamesTerms(fieldNameTerms); } /** returns true if this field is allowed. */ @@ -346,21 +355,14 @@ public final class FieldSubsetReader extends FilterLeafReader { } } - private Terms wrapTerms(Terms terms, String field) { + private Terms wrapTerms(Terms terms, String field) throws IOException { if (!hasField(field)) { return null; } else if (FieldNamesFieldMapper.NAME.equals(field)) { // for the _field_names field, fields for the document // are encoded as postings, where term is the field. // so we hide terms for fields we filter out. - if (terms != null) { - // check for null, in case term dictionary is not a ghostbuster - // So just because its in fieldinfos and "indexed=true" doesn't mean you can go grab a Terms for it. - // It just means at one point there was a document with that field indexed... - // The fields infos isn't updates/removed even if no docs refer to it - terms = new FieldNamesTerms(terms); - } - return terms; + return fieldNamesFilterTerms; } else { return terms; } @@ -371,9 +373,22 @@ public final class FieldSubsetReader extends FilterLeafReader { * representing fields that should not be visible in this reader. */ class FieldNamesTerms extends FilterTerms { + final long size; + final long sumDocFreq; - FieldNamesTerms(Terms in) { + FieldNamesTerms(Terms in) throws IOException { super(in); + assert in.hasFreqs() == false; + // re-compute the stats for the field to take + // into account the filtered terms. + final TermsEnum e = iterator(); + long size = 0, sumDocFreq = 0; + while (e.next() != null) { + size ++; + sumDocFreq += e.docFreq(); + } + this.size = size; + this.sumDocFreq = sumDocFreq; } @Override @@ -381,27 +396,20 @@ public final class FieldSubsetReader extends FilterLeafReader { return new FieldNamesTermsEnum(in.iterator()); } - // we don't support field statistics (since we filter out terms) - // but this isn't really a big deal: _field_names is not used for ranking. - @Override - public int getDocCount() throws IOException { - return -1; + public long size() throws IOException { + return size; } @Override public long getSumDocFreq() throws IOException { - return -1; + return sumDocFreq; } @Override - public long getSumTotalTermFreq() throws IOException { - return -1; - } - - @Override - public long size() throws IOException { - return -1; + public int getDocCount() throws IOException { + // it is costly to recompute this value so we assume that docCount == maxDoc. + return maxDoc(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java index 068c722c778..6c52d3e75dc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ClusterPrivilege.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.authz.privilege; import org.apache.lucene.util.automaton.Automaton; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; @@ -41,6 +42,7 @@ public final class ClusterPrivilege extends Privilege { private static final Automaton MANAGE_IDX_TEMPLATE_AUTOMATON = patterns("indices:admin/template/*"); private static final Automaton MANAGE_INGEST_PIPELINE_AUTOMATON = patterns("cluster:admin/ingest/pipeline/*"); private static final Automaton MANAGE_ROLLUP_AUTOMATON = patterns("cluster:admin/xpack/rollup/*", "cluster:monitor/xpack/rollup/*"); + private static final Automaton MANAGE_CCR_AUTOMATON = patterns("cluster:admin/xpack/ccr/*", ClusterStateAction.NAME); public static final ClusterPrivilege NONE = new ClusterPrivilege("none", Automatons.EMPTY); public static final ClusterPrivilege ALL = new ClusterPrivilege("all", ALL_CLUSTER_AUTOMATON); @@ -60,6 +62,7 @@ public final class ClusterPrivilege extends Privilege { public static final ClusterPrivilege MANAGE_SECURITY = new ClusterPrivilege("manage_security", MANAGE_SECURITY_AUTOMATON); public static final ClusterPrivilege MANAGE_SAML = new ClusterPrivilege("manage_saml", MANAGE_SAML_AUTOMATON); public static final ClusterPrivilege MANAGE_PIPELINE = new ClusterPrivilege("manage_pipeline", "cluster:admin/ingest/pipeline/*"); + public static final ClusterPrivilege MANAGE_CCR = new ClusterPrivilege("manage_ccr", MANAGE_CCR_AUTOMATON); public static final Predicate ACTION_MATCHER = ClusterPrivilege.ALL.predicate(); @@ -80,6 +83,7 @@ public final class ClusterPrivilege extends Privilege { .put("manage_saml", MANAGE_SAML) .put("manage_pipeline", MANAGE_PIPELINE) .put("manage_rollup", MANAGE_ROLLUP) + .put("manage_ccr", MANAGE_CCR) .immutableMap(); private static final ConcurrentHashMap, ClusterPrivilege> CACHE = new ConcurrentHashMap<>(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java index a8627d21542..58ca42c7f68 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/RemoteClusterLicenseCheckerTests.java @@ -118,7 +118,7 @@ public final class RemoteClusterLicenseCheckerTests extends ESTestCase { responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); final RemoteClusterLicenseChecker licenseChecker = - new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); final AtomicReference licenseCheck = new AtomicReference<>(); licenseChecker.checkRemoteClusterLicenses( @@ -160,7 +160,7 @@ public final class RemoteClusterLicenseCheckerTests extends ESTestCase { }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); final RemoteClusterLicenseChecker licenseChecker = - new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); final AtomicReference licenseCheck = new AtomicReference<>(); licenseChecker.checkRemoteClusterLicenses( @@ -206,7 +206,7 @@ public final class RemoteClusterLicenseCheckerTests extends ESTestCase { responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); final RemoteClusterLicenseChecker licenseChecker = - new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); final AtomicReference exception = new AtomicReference<>(); licenseChecker.checkRemoteClusterLicenses( @@ -246,7 +246,7 @@ public final class RemoteClusterLicenseCheckerTests extends ESTestCase { }).when(client).execute(same(XPackInfoAction.INSTANCE), any(), any()); final RemoteClusterLicenseChecker licenseChecker = - new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); final List remoteClusterAliases = Collections.singletonList("valid"); licenseChecker.checkRemoteClusterLicenses( @@ -285,7 +285,7 @@ public final class RemoteClusterLicenseCheckerTests extends ESTestCase { responses.add(new XPackInfoResponse(null, createPlatinumLicenseResponse(), null)); final RemoteClusterLicenseChecker licenseChecker = - new RemoteClusterLicenseChecker(client, RemoteClusterLicenseChecker::isLicensePlatinumOrTrial); + new RemoteClusterLicenseChecker(client, XPackLicenseState::isPlatinumOrTrialOperationMode); final AtomicBoolean listenerInvoked = new AtomicBoolean(); threadPool.getThreadContext().putHeader("key", "value"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index e0029362003..70dca834c16 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -33,7 +33,9 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.TokenizerFactory; +import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.ingest.Processor; @@ -44,6 +46,7 @@ import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.plugins.DiscoveryPlugin; +import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.NetworkPlugin; @@ -70,6 +73,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; +import java.util.Optional; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Predicate; @@ -80,7 +85,7 @@ import java.util.stream.Collectors; import static java.util.stream.Collectors.toList; public class LocalStateCompositeXPackPlugin extends XPackPlugin implements ScriptPlugin, ActionPlugin, IngestPlugin, NetworkPlugin, - ClusterPlugin, DiscoveryPlugin, MapperPlugin, AnalysisPlugin, PersistentTaskPlugin { + ClusterPlugin, DiscoveryPlugin, MapperPlugin, AnalysisPlugin, PersistentTaskPlugin, EnginePlugin { private XPackLicenseState licenseState; private SSLService sslService; @@ -391,6 +396,20 @@ public class LocalStateCompositeXPackPlugin extends XPackPlugin implements Scrip IOUtils.close(plugins); } + @Override + public Optional getEngineFactory(IndexSettings indexSettings) { + List> enginePlugins = filterPlugins(EnginePlugin.class).stream() + .map(p -> p.getEngineFactory(indexSettings)) + .collect(Collectors.toList()); + if (enginePlugins.size() == 0) { + return Optional.empty(); + } else if (enginePlugins.size() == 1) { + return enginePlugins.stream().findFirst().get(); + } else { + throw new IllegalStateException("Only one EngineFactory plugin allowed"); + } + } + private List filterPlugins(Class type) { return plugins.stream().filter(x -> type.isAssignableFrom(x.getClass())).map(p -> ((T)p)) .collect(Collectors.toList()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java index e71b0e5e8bd..d2f7d7bdb96 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/accesscontrol/FieldSubsetReaderTests.java @@ -78,7 +78,7 @@ import static org.hamcrest.Matchers.equalTo; /** Simple tests for this filterreader */ public class FieldSubsetReaderTests extends ESTestCase { - + /** * test filtering two string fields */ @@ -86,16 +86,16 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StringField("fieldA", "test", Field.Store.NO)); doc.add(new StringField("fieldB", "test", Field.Store.NO)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); Set seenFields = new HashSet<>(); @@ -105,11 +105,11 @@ public class FieldSubsetReaderTests extends ESTestCase { assertEquals(Collections.singleton("fieldA"), seenFields); assertNotNull(segmentReader.terms("fieldA")); assertNull(segmentReader.terms("fieldB")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two int points */ @@ -181,25 +181,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StoredField("fieldA", "testA")); doc.add(new StoredField("fieldB", "testB")); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field Document d2 = ir.document(0); assertEquals(1, d2.getFields().size()); assertEquals("testA", d2.get("fieldA")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two stored fields (binary) */ @@ -207,25 +207,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StoredField("fieldA", new BytesRef("testA"))); doc.add(new StoredField("fieldB", new BytesRef("testB"))); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field Document d2 = ir.document(0); assertEquals(1, d2.getFields().size()); assertEquals(new BytesRef("testA"), d2.getBinaryValue("fieldA")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two stored fields (int) */ @@ -233,25 +233,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StoredField("fieldA", 1)); doc.add(new StoredField("fieldB", 2)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field Document d2 = ir.document(0); assertEquals(1, d2.getFields().size()); assertEquals(1, d2.getField("fieldA").numericValue()); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two stored fields (long) */ @@ -259,25 +259,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StoredField("fieldA", 1L)); doc.add(new StoredField("fieldB", 2L)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field Document d2 = ir.document(0); assertEquals(1, d2.getFields().size()); assertEquals(1L, d2.getField("fieldA").numericValue()); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two stored fields (float) */ @@ -285,25 +285,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StoredField("fieldA", 1F)); doc.add(new StoredField("fieldB", 2F)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field Document d2 = ir.document(0); assertEquals(1, d2.getFields().size()); assertEquals(1F, d2.getField("fieldA").numericValue()); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two stored fields (double) */ @@ -311,25 +311,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StoredField("fieldA", 1D)); doc.add(new StoredField("fieldB", 2D)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field Document d2 = ir.document(0); assertEquals(1, d2.getFields().size()); assertEquals(1D, d2.getField("fieldA").numericValue()); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two vector fields */ @@ -337,7 +337,7 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); FieldType ft = new FieldType(StringField.TYPE_NOT_STORED); @@ -345,10 +345,10 @@ public class FieldSubsetReaderTests extends ESTestCase { doc.add(new Field("fieldA", "testA", ft)); doc.add(new Field("fieldB", "testB", ft)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field Fields vectors = ir.getTermVectors(0); Set seenFields = new HashSet<>(); @@ -356,11 +356,11 @@ public class FieldSubsetReaderTests extends ESTestCase { seenFields.add(field); } assertEquals(Collections.singleton("fieldA"), seenFields); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two text fields */ @@ -368,25 +368,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new TextField("fieldA", "test", Field.Store.NO)); doc.add(new TextField("fieldB", "test", Field.Store.NO)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); assertNotNull(segmentReader.getNormValues("fieldA")); assertNull(segmentReader.getNormValues("fieldB")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two numeric dv fields */ @@ -394,16 +394,16 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new NumericDocValuesField("fieldA", 1)); doc.add(new NumericDocValuesField("fieldB", 2)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); NumericDocValues values = segmentReader.getNumericDocValues("fieldA"); @@ -411,11 +411,11 @@ public class FieldSubsetReaderTests extends ESTestCase { assertTrue(values.advanceExact(0)); assertEquals(1, values.longValue()); assertNull(segmentReader.getNumericDocValues("fieldB")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two binary dv fields */ @@ -423,16 +423,16 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new BinaryDocValuesField("fieldA", new BytesRef("testA"))); doc.add(new BinaryDocValuesField("fieldB", new BytesRef("testB"))); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); BinaryDocValues values = segmentReader.getBinaryDocValues("fieldA"); @@ -444,7 +444,7 @@ public class FieldSubsetReaderTests extends ESTestCase { TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two sorted dv fields */ @@ -452,16 +452,16 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new SortedDocValuesField("fieldA", new BytesRef("testA"))); doc.add(new SortedDocValuesField("fieldB", new BytesRef("testB"))); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); SortedDocValues values = segmentReader.getSortedDocValues("fieldA"); @@ -469,11 +469,11 @@ public class FieldSubsetReaderTests extends ESTestCase { assertTrue(values.advanceExact(0)); assertEquals(new BytesRef("testA"), values.binaryValue()); assertNull(segmentReader.getSortedDocValues("fieldB")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two sortedset dv fields */ @@ -481,16 +481,16 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new SortedSetDocValuesField("fieldA", new BytesRef("testA"))); doc.add(new SortedSetDocValuesField("fieldB", new BytesRef("testB"))); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); SortedSetDocValues dv = segmentReader.getSortedSetDocValues("fieldA"); @@ -500,11 +500,11 @@ public class FieldSubsetReaderTests extends ESTestCase { assertEquals(SortedSetDocValues.NO_MORE_ORDS, dv.nextOrd()); assertEquals(new BytesRef("testA"), dv.lookupOrd(0)); assertNull(segmentReader.getSortedSetDocValues("fieldB")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering two sortednumeric dv fields */ @@ -512,16 +512,16 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new SortedNumericDocValuesField("fieldA", 1)); doc.add(new SortedNumericDocValuesField("fieldB", 2)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); SortedNumericDocValues dv = segmentReader.getSortedNumericDocValues("fieldA"); @@ -530,11 +530,11 @@ public class FieldSubsetReaderTests extends ESTestCase { assertEquals(1, dv.docValueCount()); assertEquals(1, dv.nextValue()); assertNull(segmentReader.getSortedNumericDocValues("fieldB")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test we have correct fieldinfos metadata */ @@ -542,27 +542,27 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StringField("fieldA", "test", Field.Store.NO)); doc.add(new StringField("fieldB", "test", Field.Store.NO)); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); FieldInfos infos = segmentReader.getFieldInfos(); assertEquals(1, infos.size()); assertNotNull(infos.fieldInfo("fieldA")); assertNull(infos.fieldInfo("fieldB")); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test special handling for _source field. */ @@ -570,7 +570,7 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StringField("fieldA", "testA", Field.Store.NO)); @@ -578,16 +578,16 @@ public class FieldSubsetReaderTests extends ESTestCase { byte bytes[] = "{\"fieldA\":\"testA\", \"fieldB\":\"testB\"}".getBytes(StandardCharsets.UTF_8); doc.add(new StoredField(SourceFieldMapper.NAME, bytes, 0, bytes.length)); iw.addDocument(doc); - + // open reader Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", SourceFieldMapper.NAME)); DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); - + // see only one field Document d2 = ir.document(0); assertEquals(1, d2.getFields().size()); assertEquals("{\"fieldA\":\"testA\"}", d2.getBinaryValue(SourceFieldMapper.NAME).utf8ToString()); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } @@ -741,7 +741,7 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StringField("fieldA", "test", Field.Store.NO)); @@ -749,37 +749,37 @@ public class FieldSubsetReaderTests extends ESTestCase { doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldA", Field.Store.NO)); doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldB", Field.Store.NO)); iw.addDocument(doc); - + // open reader Set fields = new HashSet<>(); fields.add("fieldA"); Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", FieldNamesFieldMapper.NAME)); DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); Terms terms = segmentReader.terms(FieldNamesFieldMapper.NAME); TermsEnum termsEnum = terms.iterator(); assertEquals(new BytesRef("fieldA"), termsEnum.next()); assertNull(termsEnum.next()); - - // seekExact + + // seekExact termsEnum = terms.iterator(); assertTrue(termsEnum.seekExact(new BytesRef("fieldA"))); assertFalse(termsEnum.seekExact(new BytesRef("fieldB"))); - - // seekCeil + + // seekCeil termsEnum = terms.iterator(); assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef("fieldA"))); assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(new BytesRef("field0000"))); assertEquals(new BytesRef("fieldA"), termsEnum.term()); assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("fieldAAA"))); assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("fieldB"))); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test special handling for _field_names field (three fields, to exercise termsenum better) */ @@ -787,7 +787,7 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StringField("fieldA", "test", Field.Store.NO)); @@ -797,11 +797,11 @@ public class FieldSubsetReaderTests extends ESTestCase { doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldB", Field.Store.NO)); doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldC", Field.Store.NO)); iw.addDocument(doc); - + // open reader Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", "fieldC", FieldNamesFieldMapper.NAME)); DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); - + // see only two fields LeafReader segmentReader = ir.leaves().get(0).reader(); Terms terms = segmentReader.terms(FieldNamesFieldMapper.NAME); @@ -809,24 +809,24 @@ public class FieldSubsetReaderTests extends ESTestCase { assertEquals(new BytesRef("fieldA"), termsEnum.next()); assertEquals(new BytesRef("fieldC"), termsEnum.next()); assertNull(termsEnum.next()); - - // seekExact + + // seekExact termsEnum = terms.iterator(); assertTrue(termsEnum.seekExact(new BytesRef("fieldA"))); assertFalse(termsEnum.seekExact(new BytesRef("fieldB"))); assertTrue(termsEnum.seekExact(new BytesRef("fieldC"))); - - // seekCeil + + // seekCeil termsEnum = terms.iterator(); assertEquals(SeekStatus.FOUND, termsEnum.seekCeil(new BytesRef("fieldA"))); assertEquals(SeekStatus.NOT_FOUND, termsEnum.seekCeil(new BytesRef("fieldB"))); assertEquals(new BytesRef("fieldC"), termsEnum.term()); assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("fieldD"))); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test _field_names where a field is permitted, but doesn't exist in the segment. */ @@ -834,7 +834,7 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StringField("fieldA", "test", Field.Store.NO)); @@ -842,27 +842,27 @@ public class FieldSubsetReaderTests extends ESTestCase { doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldA", Field.Store.NO)); doc.add(new StringField(FieldNamesFieldMapper.NAME, "fieldB", Field.Store.NO)); iw.addDocument(doc); - + // open reader Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", "fieldC", FieldNamesFieldMapper.NAME)); DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); Terms terms = segmentReader.terms(FieldNamesFieldMapper.NAME); - - // seekExact + + // seekExact TermsEnum termsEnum = terms.iterator(); assertFalse(termsEnum.seekExact(new BytesRef("fieldC"))); - - // seekCeil + + // seekCeil termsEnum = terms.iterator(); assertEquals(SeekStatus.END, termsEnum.seekCeil(new BytesRef("fieldC"))); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test where _field_names does not exist */ @@ -870,25 +870,25 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); doc.add(new StringField("fieldA", "test", Field.Store.NO)); doc.add(new StringField("fieldB", "test", Field.Store.NO)); iw.addDocument(doc); - + // open reader Automaton automaton = Automatons.patterns(Arrays.asList("fieldA", SourceFieldMapper.NAME)); DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(automaton)); - + // see only one field LeafReader segmentReader = ir.leaves().get(0).reader(); assertNull(segmentReader.terms(FieldNamesFieldMapper.NAME)); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** test that core cache key (needed for NRT) is working */ public void testCoreCacheKey() throws Exception { Directory dir = newDirectory(); @@ -896,7 +896,7 @@ public class FieldSubsetReaderTests extends ESTestCase { iwc.setMaxBufferedDocs(100); iwc.setMergePolicy(NoMergePolicy.INSTANCE); IndexWriter iw = new IndexWriter(dir, iwc); - + // add two docs, id:0 and id:1 Document doc = new Document(); Field idField = new StringField("id", "", Field.Store.NO); @@ -905,7 +905,7 @@ public class FieldSubsetReaderTests extends ESTestCase { iw.addDocument(doc); idField.setStringValue("1"); iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("id"))); assertEquals(2, ir.numDocs()); @@ -914,17 +914,17 @@ public class FieldSubsetReaderTests extends ESTestCase { // delete id:0 and reopen iw.deleteDocuments(new Term("id", "0")); DirectoryReader ir2 = DirectoryReader.openIfChanged(ir); - + // we should have the same cache key as before assertEquals(1, ir2.numDocs()); assertEquals(1, ir2.leaves().size()); assertSame(ir.leaves().get(0).reader().getCoreCacheHelper().getKey(), ir2.leaves().get(0).reader().getCoreCacheHelper().getKey()); - + TestUtil.checkReader(ir); IOUtils.close(ir, ir2, iw, dir); } - + /** * test filtering the only vector fields */ @@ -932,7 +932,7 @@ public class FieldSubsetReaderTests extends ESTestCase { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); - + // add document with 2 fields Document doc = new Document(); FieldType ft = new FieldType(StringField.TYPE_NOT_STORED); @@ -940,17 +940,17 @@ public class FieldSubsetReaderTests extends ESTestCase { doc.add(new Field("fieldA", "testA", ft)); doc.add(new StringField("fieldB", "testB", Field.Store.NO)); // no vectors iw.addDocument(doc); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldB"))); - + // sees no fields assertNull(ir.getTermVectors(0)); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } - + /** * test filtering an index with no fields */ @@ -959,10 +959,10 @@ public class FieldSubsetReaderTests extends ESTestCase { IndexWriterConfig iwc = new IndexWriterConfig(null); IndexWriter iw = new IndexWriter(dir, iwc); iw.addDocument(new Document()); - + // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("fieldA"))); - + // see no fields LeafReader segmentReader = ir.leaves().get(0).reader(); Set seenFields = new HashSet<>(); @@ -971,14 +971,14 @@ public class FieldSubsetReaderTests extends ESTestCase { } assertEquals(0, seenFields.size()); assertNull(segmentReader.terms("foo")); - + // see no vectors assertNull(segmentReader.getTermVectors(0)); - + // see no stored fields Document document = segmentReader.document(0); assertEquals(0, document.getFields().size()); - + TestUtil.checkReader(ir); IOUtils.close(ir, iw, dir); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java index 6857a48784b..c4c95211d4c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/PrivilegeTests.java @@ -127,4 +127,13 @@ public class PrivilegeTests extends ESTestCase { assertThat(predicate.test("indices:admin/seq_no/global_checkpoint_sync[p]"), is(true)); assertThat(predicate.test("indices:admin/seq_no/global_checkpoint_sync[r]"), is(true)); } + + public void testManageCcrPrivilege() { + Predicate predicate = ClusterPrivilege.MANAGE_CCR.predicate(); + assertThat(predicate.test("cluster:admin/xpack/ccr/follow_index"), is(true)); + assertThat(predicate.test("cluster:admin/xpack/ccr/unfollow_index"), is(true)); + assertThat(predicate.test("cluster:admin/xpack/ccr/brand_new_api"), is(true)); + assertThat(predicate.test("cluster:admin/xpack/whatever"), is(false)); + } + } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index d6ebdd0449e..f3f4a771443 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -144,7 +144,7 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction client().prepareSearch() diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json new file mode 100644 index 00000000000..46ff872a1a4 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.create_and_follow_index.json @@ -0,0 +1,21 @@ +{ + "ccr.create_and_follow_index": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "methods": [ "POST" ], + "url": { + "path": "/{index}/_ccr/create_and_follow", + "paths": [ "/{index}/_ccr/create_and_follow" ], + "parts": { + "index": { + "type": "string", + "required": true, + "description": "The name of the follower index" + } + } + }, + "body": { + "description" : "The name of the leader index and other optional ccr related parameters", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_index.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_index.json new file mode 100644 index 00000000000..749aae48d91 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.follow_index.json @@ -0,0 +1,21 @@ +{ + "ccr.follow_index": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "methods": [ "POST" ], + "url": { + "path": "/{index}/_ccr/follow", + "paths": [ "/{index}/_ccr/follow" ], + "parts": { + "index": { + "type": "string", + "required": true, + "description": "The name of the follower index." + } + } + }, + "body": { + "description" : "The name of the leader index and other optional ccr related parameters", + "required" : true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.stats.json new file mode 100644 index 00000000000..7f5cda09f25 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.stats.json @@ -0,0 +1,16 @@ +{ + "ccr.stats": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "methods": [ "GET" ], + "url": { + "path": "/_ccr/stats", + "paths": [ "/_ccr/stats", "/_ccr/stats/{index}" ], + "parts": { + "index": { + "type": "list", + "description": "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices" + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json new file mode 100644 index 00000000000..5e9a111496a --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/ccr.unfollow_index.json @@ -0,0 +1,17 @@ +{ + "ccr.unfollow_index": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/current", + "methods": [ "POST" ], + "url": { + "path": "/{index}/_ccr/unfollow", + "paths": [ "/{index}/_ccr/unfollow" ], + "parts": { + "index": { + "type": "string", + "required": true, + "description": "The name of the follower index that should stop following its leader index." + } + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml new file mode 100644 index 00000000000..6c95f307c25 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/follow_and_unfollow.yml @@ -0,0 +1,42 @@ +--- +"Test follow and unfollow an existing index": + - do: + indices.create: + index: foo + body: + settings: + index: + soft_deletes: + enabled: true + mappings: + doc: + properties: + field: + type: keyword + - is_true: acknowledged + + - do: + ccr.create_and_follow_index: + index: bar + body: + leader_index: foo + - is_true: follow_index_created + - is_true: follow_index_shards_acked + - is_true: index_following_started + + - do: + ccr.unfollow_index: + index: bar + - is_true: acknowledged + + - do: + ccr.follow_index: + index: bar + body: + leader_index: foo + - is_true: acknowledged + + - do: + ccr.unfollow_index: + index: bar + - is_true: acknowledged diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/stats.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/stats.yml new file mode 100644 index 00000000000..431629b1d23 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/ccr/stats.yml @@ -0,0 +1,57 @@ +--- +"Test stats": + - do: + indices.create: + index: foo + body: + settings: + index: + soft_deletes: + enabled: true + mappings: + doc: + properties: + field: + type: keyword + + - do: + ccr.create_and_follow_index: + index: bar + body: + leader_index: foo + - is_true: follow_index_created + - is_true: follow_index_shards_acked + - is_true: index_following_started + + # we can not reliably wait for replication to occur so we test the endpoint without indexing any documents + - do: + ccr.stats: + index: bar + - match: { bar.0.leader_index: "foo" } + - match: { bar.0.shard_id: 0 } + - gte: { bar.0.leader_global_checkpoint: -1 } + - gte: { bar.0.leader_max_seq_no: -1 } + - gte: { bar.0.follower_global_checkpoint: -1 } + - gte: { bar.0.follower_max_seq_no: -1 } + - gte: { bar.0.last_requested_seq_no: -1 } + - gte: { bar.0.number_of_concurrent_reads: 0 } + - match: { bar.0.number_of_concurrent_writes: 0 } + - match: { bar.0.number_of_queued_writes: 0 } + - gte: { bar.0.mapping_version: 0 } + - gte: { bar.0.total_fetch_time_millis: 0 } + - gte: { bar.0.number_of_successful_fetches: 0 } + - gte: { bar.0.number_of_failed_fetches: 0 } + - match: { bar.0.operations_received: 0 } + - match: { bar.0.total_transferred_bytes: 0 } + - match: { bar.0.total_index_time_millis: 0 } + - match: { bar.0.number_of_successful_bulk_operations: 0 } + - match: { bar.0.number_of_failed_bulk_operations: 0 } + - match: { bar.0.number_of_operations_indexed: 0 } + - length: { bar.0.fetch_exceptions: 0 } + - gte: { bar.0.time_since_last_fetch_millis: -1 } + + - do: + ccr.unfollow_index: + index: bar + - is_true: acknowledged + diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml index a33fcdb5297..7a22ad322bf 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/watcher/usage/10_basic.yml @@ -1,6 +1,8 @@ --- "Test watcher usage stats output": - + - skip: + version: "all" + reason: AwaitsFix at https://github.com/elastic/elasticsearch/issues/33326 - do: catch: missing xpack.watcher.delete_watch: diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml index fe48a6ba62d..410250ef599 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/10_basic.yml @@ -10,6 +10,7 @@ - do: nodes.info: {} + - contains: { nodes.$master.modules: { name: x-pack-ccr } } - contains: { nodes.$master.modules: { name: x-pack-core } } - contains: { nodes.$master.modules: { name: x-pack-deprecation } } - contains: { nodes.$master.modules: { name: x-pack-graph } } diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 6ead87aba61..7c4eda37d2f 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -325,7 +325,6 @@ public class FullClusterRestartIT extends ESRestTestCase { } } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/32773") public void testRollupIDSchemeAfterRestart() throws Exception { assumeTrue("Rollup can be tested with 6.3.0 and onwards", oldClusterVersion.onOrAfter(Version.V_6_3_0)); assumeTrue("Rollup ID scheme changed in 6.4", oldClusterVersion.before(Version.V_6_4_0)); @@ -393,6 +392,8 @@ public class FullClusterRestartIT extends ESRestTestCase { indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-02T00:00:01\",\"value\":345}"); client().performRequest(indexRequest); + assertRollUpJob("rollup-id-test"); + // stop the rollup job to force a state save, which will upgrade the ID final Request stopRollupJobRequest = new Request("POST", "_xpack/rollup/job/rollup-id-test/_stop"); Map stopRollupJobResponse = entityAsMap(client().performRequest(stopRollupJobRequest)); diff --git a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java index 538d54416bf..17fbf0769fd 100644 --- a/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java +++ b/x-pack/qa/smoke-test-watcher-with-security/src/test/java/org/elasticsearch/smoketest/SmokeTestWatcherWithSecurityIT.java @@ -135,6 +135,7 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33320") public void testSearchInputHasPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -185,6 +186,7 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { assertThat(conditionMet, is(false)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33320") public void testSearchTransformHasPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -242,6 +244,7 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { assertThat(response.getStatusLine().getStatusCode(), is(404)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30777") public void testIndexActionHasPermissions() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject(); @@ -266,6 +269,7 @@ public class SmokeTestWatcherWithSecurityIT extends ESRestTestCase { assertThat(spam, is("eggs")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/33320") public void testIndexActionInsufficientPrivileges() throws Exception { try (XContentBuilder builder = jsonBuilder()) { builder.startObject();